code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
---|---|
const std = @import("std");
const ArrayList = std.ArrayList;
const sdl = @import("./sdl.zig");
const textures = @import("./textures.zig");
const gui = @import("./gui.zig");
const core = @import("core");
const Coord = core.geometry.Coord;
const makeCoord = core.geometry.makeCoord;
const Rect = core.geometry.Rect;
const directionToRotation = core.geometry.directionToRotation;
const InputEngine = @import("./input_engine.zig").InputEngine;
const Button = @import("./input_engine.zig").Button;
const GameEngineClient = core.game_engine_client.GameEngineClient;
const Species = core.protocol.Species;
const Floor = core.protocol.Floor;
const Wall = core.protocol.Wall;
const Response = core.protocol.Response;
const Event = core.protocol.Event;
const PerceivedHappening = core.protocol.PerceivedHappening;
const PerceivedFrame = core.protocol.PerceivedFrame;
const PerceivedThing = core.protocol.PerceivedThing;
const allocator = std.heap.c_allocator;
const getHeadPosition = core.game_logic.getHeadPosition;
const logical_window_size = sdl.makeRect(Rect{ .x = 0, .y = 0, .width = 712, .height = 512 });
/// changes when the window resizes
/// FIXME: should initialize to logical_window_size, but workaround https://github.com/ziglang/zig/issues/2855
var output_rect = sdl.makeRect(Rect{
.x = logical_window_size.x,
.y = logical_window_size.y,
.width = logical_window_size.w,
.height = logical_window_size.h,
});
pub fn main() anyerror!void {
core.debug.init();
core.debug.nameThisThread("gui");
defer core.debug.unnameThisThread();
core.debug.thread_lifecycle.print("init", .{});
defer core.debug.thread_lifecycle.print("shutdown", .{});
// SDL handling SIGINT blocks propagation to child threads.
if (!(sdl.c.SDL_SetHintWithPriority(sdl.c.SDL_HINT_NO_SIGNAL_HANDLERS, "1", sdl.c.SDL_HintPriority.SDL_HINT_OVERRIDE) != sdl.c.SDL_bool.SDL_FALSE)) {
std.debug.panic("failed to disable sdl signal handlers\n", .{});
}
if (sdl.c.SDL_Init(sdl.c.SDL_INIT_VIDEO) != 0) {
std.debug.panic("SDL_Init failed: {c}\n", .{sdl.c.SDL_GetError()});
}
defer sdl.c.SDL_Quit();
const screen = sdl.c.SDL_CreateWindow(
"Legend of Swarkland",
sdl.SDL_WINDOWPOS_UNDEFINED,
sdl.SDL_WINDOWPOS_UNDEFINED,
logical_window_size.w,
logical_window_size.h,
sdl.c.SDL_WINDOW_RESIZABLE,
) orelse {
std.debug.panic("SDL_CreateWindow failed: {c}\n", .{sdl.c.SDL_GetError()});
};
defer sdl.c.SDL_DestroyWindow(screen);
const renderer: *sdl.Renderer = sdl.c.SDL_CreateRenderer(screen, -1, 0) orelse {
std.debug.panic("SDL_CreateRenderer failed: {c}\n", .{sdl.c.SDL_GetError()});
};
defer sdl.c.SDL_DestroyRenderer(renderer);
{
var renderer_info: sdl.c.SDL_RendererInfo = undefined;
sdl.assertZero(sdl.c.SDL_GetRendererInfo(renderer, &renderer_info));
if (renderer_info.flags & @bitCast(u32, sdl.c.SDL_RENDERER_TARGETTEXTURE) == 0) {
std.debug.panic("rendering to a temporary texture is not supported", .{});
}
}
const screen_buffer: *sdl.Texture = sdl.c.SDL_CreateTexture(
renderer,
sdl.c.SDL_PIXELFORMAT_ABGR8888,
sdl.c.SDL_TEXTUREACCESS_TARGET,
logical_window_size.w,
logical_window_size.h,
) orelse {
std.debug.panic("SDL_CreateTexture failed: {c}\n", .{sdl.c.SDL_GetError()});
};
defer sdl.c.SDL_DestroyTexture(screen_buffer);
textures.init(renderer);
defer textures.deinit();
try doMainLoop(renderer, screen_buffer);
}
fn doMainLoop(renderer: *sdl.Renderer, screen_buffer: *sdl.Texture) !void {
const aesthetic_seed = 0xbee894fc;
var input_engine = InputEngine.init();
var inputs_considered_harmful = true;
const InputPrompt = enum {
none, // TODO: https://github.com/ziglang/zig/issues/1332 and use null instead of this.
attack,
kick,
};
const GameState = union(enum) {
main_menu: gui.LinearMenuState,
running: Running,
const Running = struct {
client: GameEngineClient,
client_state: ?PerceivedFrame = null,
input_prompt: InputPrompt = .none,
animations: ?Animations = null,
/// only tracked to display aesthetics consistently through movement.
total_journey_offset: Coord = Coord{ .x = 0, .y = 0 },
// tutorial state should *not* reset through undo.
/// 0, 1, infinity
kicks_performed: u2 = 0,
observed_kangaroo_death: bool = false,
};
};
var game_state = GameState{ .main_menu = gui.LinearMenuState.init() };
defer switch (game_state) {
GameState.running => |*state| state.client.stopEngine(),
else => {},
};
while (true) {
// TODO: use better source of time (that doesn't crash after running for a month)
const now = @intCast(i32, sdl.c.SDL_GetTicks());
switch (game_state) {
GameState.main_menu => |*main_menu_state| {
main_menu_state.beginFrame();
},
GameState.running => |*state| {
while (state.client.queues.takeResponse()) |response| {
switch (response) {
.stuff_happens => |happening| {
// Show animations for what's going on.
state.animations = try loadAnimations(happening.frames, now);
state.client_state = happening.frames[happening.frames.len - 1];
state.total_journey_offset = state.total_journey_offset.plus(state.animations.?.frame_index_to_aesthetic_offset[happening.frames.len - 1]);
for (happening.frames) |frame| {
for (frame.others) |other| {
if (other.activity == .death and other.species == .kangaroo) {
state.observed_kangaroo_death = true;
}
}
if (frame.self.activity == .kick) {
if (state.kicks_performed < 2) state.kicks_performed += 1;
}
}
},
.load_state => |frame| {
state.animations = null;
state.client_state = frame;
},
.reject_request => {
// oh sorry.
},
}
}
},
}
var event: sdl.c.SDL_Event = undefined;
while (sdl.SDL_PollEvent(&event) != 0) {
switch (event.@"type") {
sdl.c.SDL_QUIT => {
core.debug.thread_lifecycle.print("sdl quit", .{});
return;
},
sdl.c.SDL_WINDOWEVENT => {
switch (event.window.event) {
sdl.c.SDL_WINDOWEVENT_FOCUS_GAINED => {
inputs_considered_harmful = true;
},
else => {},
}
},
sdl.c.SDL_KEYDOWN, sdl.c.SDL_KEYUP => {
if (input_engine.handleEvent(event)) |button| {
if (inputs_considered_harmful) {
// when we first get focus, SDL gives a friendly digest of all the buttons that already held down.
// these are not inputs for us.
continue;
}
switch (game_state) {
GameState.main_menu => |*main_menu_state| {
switch (button) {
Button.up => {
main_menu_state.moveUp();
},
Button.down => {
main_menu_state.moveDown();
},
Button.enter => {
main_menu_state.enter();
},
else => {},
}
},
GameState.running => |*state| {
switch (button) {
.left => {
switch (state.input_prompt) {
.none => {
try state.client.move(makeCoord(-1, 0));
},
.attack => {
try state.client.attack(makeCoord(-1, 0));
},
.kick => {
try state.client.kick(makeCoord(-1, 0));
},
}
state.input_prompt = .none;
},
.right => {
switch (state.input_prompt) {
.none => {
try state.client.move(makeCoord(1, 0));
},
.attack => {
try state.client.attack(makeCoord(1, 0));
},
.kick => {
try state.client.kick(makeCoord(1, 0));
},
}
state.input_prompt = .none;
},
.up => {
switch (state.input_prompt) {
.none => {
try state.client.move(makeCoord(0, -1));
},
.attack => {
try state.client.attack(makeCoord(0, -1));
},
.kick => {
try state.client.kick(makeCoord(0, -1));
},
}
state.input_prompt = .none;
},
.down => {
switch (state.input_prompt) {
.none => {
try state.client.move(makeCoord(0, 1));
},
.attack => {
try state.client.attack(makeCoord(0, 1));
},
.kick => {
try state.client.kick(makeCoord(0, 1));
},
}
state.input_prompt = .none;
},
.start_attack => {
state.input_prompt = .attack;
},
.start_kick => {
state.input_prompt = .kick;
},
.backspace => {
if (state.input_prompt != .none) {
state.input_prompt = .none;
} else {
try state.client.rewind();
}
},
.escape => {
state.input_prompt = .none;
},
.restart => {
state.client.stopEngine();
game_state = GameState{ .main_menu = gui.LinearMenuState.init() };
},
.beat_level => {
try state.client.beatLevelMacro();
},
else => {},
}
},
}
}
},
else => {},
}
}
sdl.assertZero(sdl.c.SDL_SetRenderTarget(renderer, screen_buffer));
sdl.assertZero(sdl.c.SDL_RenderClear(renderer));
switch (game_state) {
GameState.main_menu => |*main_menu_state| {
var menu_renderer = gui.Gui.init(renderer, main_menu_state, textures.sprites.human);
menu_renderer.seek(10, 10);
menu_renderer.scale(2);
menu_renderer.bold(true);
menu_renderer.marginBottom(5);
menu_renderer.text("Legend of Swarkland");
menu_renderer.scale(1);
menu_renderer.bold(false);
menu_renderer.seekRelative(70, 30);
if (menu_renderer.button(" ")) {
game_state = GameState{
.running = GameState.Running{
.client = undefined,
},
};
try game_state.running.client.startAsThread();
}
menu_renderer.seekRelative(-70, 50);
menu_renderer.text("Controls:");
menu_renderer.text(" Arrow keys: Move");
menu_renderer.text(" F: Start attack");
menu_renderer.text(" Arrow keys: Attack in direction");
menu_renderer.text(" Backspace: Undo");
menu_renderer.text(" Ctrl+R: Quit to this menu");
menu_renderer.text(" Enter: Start Game");
menu_renderer.text(" ");
menu_renderer.text(" ");
menu_renderer.text("version: " ++ textures.version_string);
},
GameState.running => |*state| blk: {
if (state.client_state == null) break :blk;
const move_frame_time = 300;
// at one point in what frame should we render?
var frame = state.client_state.?;
var progress: i32 = 0;
var display_any_input_prompt = true;
var animated_aesthetic_offset = makeCoord(0, 0);
if (state.animations) |animations| {
const animation_time = @bitCast(u32, now -% animations.start_time);
const movement_phase = @divFloor(animation_time, move_frame_time);
if (movement_phase < animations.frames.len) {
// animating
frame = animations.frames[movement_phase];
progress = @intCast(i32, animation_time - movement_phase * move_frame_time);
display_any_input_prompt = false;
animated_aesthetic_offset = animations.frame_index_to_aesthetic_offset[movement_phase].minus(animations.frame_index_to_aesthetic_offset[animations.frame_index_to_aesthetic_offset.len - 1]);
} else {
// stale
state.animations = null;
}
}
const center_screen = makeCoord(7, 7).scaled(32).plus(makeCoord(32 / 2, 32 / 2));
const camera_offset = center_screen.minus(getRelDisplayPosition(progress, move_frame_time, frame.self));
// render terrain
{
const terrain_offset = frame.terrain.rel_position.scaled(32).plus(camera_offset);
const terrain = frame.terrain.matrix;
var cursor = makeCoord(undefined, 0);
while (cursor.y <= @as(i32, terrain.height)) : (cursor.y += 1) {
cursor.x = 0;
while (cursor.x <= @as(i32, terrain.width)) : (cursor.x += 1) {
if (terrain.getCoord(cursor)) |cell| {
const display_position = cursor.scaled(32).plus(terrain_offset);
const aesthetic_coord = cursor.plus(state.total_journey_offset).plus(animated_aesthetic_offset);
const floor_texture = switch (cell.floor) {
Floor.unknown => textures.sprites.unknown_floor,
Floor.dirt => selectAesthetic(textures.sprites.dirt_floor[0..], aesthetic_seed, aesthetic_coord),
Floor.marble => selectAesthetic(textures.sprites.marble_floor[0..], aesthetic_seed, aesthetic_coord),
Floor.lava => selectAesthetic(textures.sprites.lava[0..], aesthetic_seed, aesthetic_coord),
Floor.hatch => textures.sprites.hatch,
Floor.stairs_down => textures.sprites.stairs_down,
};
textures.renderSprite(renderer, floor_texture, display_position);
const wall_texture = switch (cell.wall) {
Wall.unknown => textures.sprites.unknown_wall,
Wall.air => continue,
Wall.dirt => selectAesthetic(textures.sprites.brown_brick[0..], aesthetic_seed, aesthetic_coord),
Wall.stone => selectAesthetic(textures.sprites.gray_brick[0..], aesthetic_seed, aesthetic_coord),
Wall.centaur_transformer => textures.sprites.polymorph_trap,
};
textures.renderSprite(renderer, wall_texture, display_position);
}
}
}
}
// render the things
for (frame.others) |other| {
_ = renderThing(renderer, progress, move_frame_time, camera_offset, other);
}
const display_position = renderThing(renderer, progress, move_frame_time, camera_offset, frame.self);
// render input prompt
if (display_any_input_prompt) {
switch (state.input_prompt) {
.none => {},
.attack => {
textures.renderSprite(renderer, textures.sprites.dagger, display_position);
},
.kick => {
textures.renderSprite(renderer, textures.sprites.kick, display_position);
},
}
}
// render activity effects
for (frame.others) |other| {
renderActivity(renderer, progress, move_frame_time, camera_offset, other);
}
renderActivity(renderer, progress, move_frame_time, camera_offset, frame.self);
// sidebar
{
const AnatomySprites = struct {
diagram: Rect,
leg_wound: Rect,
limping: Rect,
};
const anatomy_sprites = switch (core.game_logic.getAnatomy(frame.self.species)) {
.humanoid => AnatomySprites{
.diagram = textures.large_sprites.humanoid,
.leg_wound = textures.large_sprites.humanoid_leg_wound,
.limping = textures.large_sprites.humanoid_limping,
},
.centauroid => AnatomySprites{
.diagram = textures.large_sprites.centauroid,
.leg_wound = textures.large_sprites.centauroid_leg_wound,
.limping = textures.large_sprites.centauroid_limping,
},
else => {
std.debug.panic("TODO\n", .{});
},
};
const anatomy_coord = makeCoord(512, 0);
textures.renderLargeSprite(renderer, anatomy_sprites.diagram, anatomy_coord);
if (frame.self.has_shield) {
textures.renderLargeSprite(renderer, textures.large_sprites.humanoid_shieled, anatomy_coord);
}
// explicit integer here to provide a compile error when new items get added.
var status_conditions: u2 = frame.self.status_conditions;
if (0 != status_conditions & core.protocol.StatusCondition_wounded_leg) {
textures.renderLargeSprite(renderer, anatomy_sprites.leg_wound, anatomy_coord);
}
if (0 != status_conditions & core.protocol.StatusCondition_limping) {
textures.renderLargeSprite(renderer, anatomy_sprites.limping, anatomy_coord);
}
}
// tutorials
var dealloc_buffer: ?[]u8 = null;
var maybe_tutorial_text: ?[]const u8 = null;
if (frame.self.activity == .death) {
maybe_tutorial_text = "you died. use Backspace to undo.";
} else if (frame.winning_score) |score| {
if (score == 1) {
maybe_tutorial_text = "you are win. use Ctrl+R to quit.";
} else {
dealloc_buffer = try std.fmt.allocPrint(allocator, "team {} wins with {} points. Ctrl+R to quit.", .{
@tagName(frame.self.species),
score,
});
maybe_tutorial_text = dealloc_buffer.?;
}
} else if (state.observed_kangaroo_death and state.kicks_performed < 2) {
maybe_tutorial_text = "You learned to kick! Use K+Arrows.";
}
if (maybe_tutorial_text) |tutorial_text| {
// gentle up/down bob
var animated_y: i32 = @divFloor(@mod(now, 2000), 100);
if (animated_y > 10) animated_y = 20 - animated_y;
const coord = makeCoord(512 / 2 - 384 / 2, 512 - 32 + animated_y);
const size = textures.renderTextScaled(renderer, tutorial_text, coord, true, 1);
}
if (dealloc_buffer) |buf| {
allocator.free(buf);
}
},
}
{
sdl.assertZero(sdl.c.SDL_SetRenderTarget(renderer, null));
sdl.assertZero(sdl.c.SDL_GetRendererOutputSize(renderer, &output_rect.w, &output_rect.h));
// preserve aspect ratio
const source_aspect_ratio = comptime @intToFloat(f32, logical_window_size.w) / @intToFloat(f32, logical_window_size.h);
const dest_aspect_ratio = @intToFloat(f32, output_rect.w) / @intToFloat(f32, output_rect.h);
if (source_aspect_ratio > dest_aspect_ratio) {
// use width
const new_height = @floatToInt(c_int, @intToFloat(f32, output_rect.w) / source_aspect_ratio);
output_rect.x = 0;
output_rect.y = @divTrunc(output_rect.h - new_height, 2);
output_rect.h = new_height;
} else {
// use height
const new_width = @floatToInt(c_int, @intToFloat(f32, output_rect.h) * source_aspect_ratio);
output_rect.x = @divTrunc(output_rect.w - new_width, 2);
output_rect.y = 0;
output_rect.w = new_width;
}
sdl.assertZero(sdl.c.SDL_RenderClear(renderer));
sdl.assertZero(sdl.c.SDL_RenderCopy(renderer, screen_buffer, &logical_window_size, &output_rect));
}
sdl.c.SDL_RenderPresent(renderer);
// delay until the next multiple of 17 milliseconds
const delay_millis = 17 - (sdl.c.SDL_GetTicks() % 17);
sdl.c.SDL_Delay(delay_millis);
inputs_considered_harmful = false;
}
}
fn getRelDisplayPosition(progress: i32, progress_denominator: i32, thing: PerceivedThing) Coord {
const rel_position = getHeadPosition(thing.rel_position);
switch (thing.activity) {
.movement => |move_delta| {
if (progress < @divFloor(progress_denominator, 2)) {
// in the first half, speed up toward the halfway point.
return core.geometry.bezier3(
rel_position.scaled(32),
rel_position.scaled(32),
rel_position.scaled(32).plus(move_delta.scaled(32 / 2)),
progress,
@divFloor(progress_denominator, 2),
);
} else {
// in the second half, slow down from the halfway point.
return core.geometry.bezier3(
rel_position.scaled(32).plus(move_delta.scaled(32 / 2)),
rel_position.scaled(32).plus(move_delta.scaled(32)),
rel_position.scaled(32).plus(move_delta.scaled(32)),
progress - @divFloor(progress_denominator, 2),
@divFloor(progress_denominator, 2),
);
}
},
.failed_movement => |move_delta| {
if (progress < @divFloor(progress_denominator, 2)) {
// in the first half, speed up toward the halfway point of the would-be movement.
return core.geometry.bezier3(
rel_position.scaled(32),
rel_position.scaled(32),
rel_position.scaled(32).plus(move_delta.scaled(32 / 2)),
progress,
@divFloor(progress_denominator, 2),
);
} else {
// in the second half, abruptly reverse course and do the opposite of the above.
return core.geometry.bezier3(
rel_position.scaled(32).plus(move_delta.scaled(32 / 2)),
rel_position.scaled(32),
rel_position.scaled(32),
progress - @divFloor(progress_denominator, 2),
@divFloor(progress_denominator, 2),
);
}
},
else => return rel_position.scaled(32),
}
}
fn renderThing(renderer: *sdl.Renderer, progress: i32, progress_denominator: i32, camera_offset: Coord, thing: PerceivedThing) Coord {
// compute position
const rel_display_position = getRelDisplayPosition(progress, progress_denominator, thing);
const display_position = rel_display_position.plus(camera_offset);
// render main sprite
switch (thing.rel_position) {
.small => {
textures.renderSprite(renderer, speciesToSprite(thing.species), display_position);
},
.large => |coords| {
const oriented_delta = coords[1].minus(coords[0]);
const tail_display_position = display_position.plus(oriented_delta.scaled(32));
const rhino_sprite_normalizing_rotation = 0;
const rotation = directionToRotation(oriented_delta) +% rhino_sprite_normalizing_rotation;
textures.renderSpriteRotated(renderer, speciesToSprite(thing.species), display_position, rotation);
textures.renderSpriteRotated(renderer, speciesToTailSprite(thing.species), tail_display_position, rotation);
},
}
// render status effects
var status_conditions: u2 = thing.status_conditions;
if (thing.status_conditions & core.protocol.StatusCondition_wounded_leg != 0) {
textures.renderSprite(renderer, textures.sprites.wounded, display_position);
}
if (thing.status_conditions & core.protocol.StatusCondition_limping != 0) {
textures.renderSprite(renderer, textures.sprites.limping, display_position);
}
if (thing.has_shield) {
textures.renderSprite(renderer, textures.sprites.equipment, display_position);
}
return display_position;
}
fn renderActivity(renderer: *sdl.Renderer, progress: i32, progress_denominator: i32, camera_offset: Coord, thing: PerceivedThing) void {
const rel_display_position = getRelDisplayPosition(progress, progress_denominator, thing);
const display_position = rel_display_position.plus(camera_offset);
switch (thing.activity) {
.none => {},
.movement => {},
.failed_movement => {},
.attack => |data| {
const max_range = core.game_logic.getAttackRange(thing.species);
if (max_range == 1) {
const dagger_sprite_normalizing_rotation = 1;
textures.renderSpriteRotated(
renderer,
textures.sprites.dagger,
display_position.plus(data.direction.scaled(32 * 3 / 4)),
directionToRotation(data.direction) +% dagger_sprite_normalizing_rotation,
);
} else {
// The animated bullet speed is determined by the max range,
// but interrupt the progress if the arrow hits something.
var clamped_progress = progress * max_range;
if (clamped_progress > data.distance * progress_denominator) {
clamped_progress = data.distance * progress_denominator;
}
const arrow_sprite_normalizing_rotation = 4;
textures.renderSpriteRotated(
renderer,
textures.sprites.arrow,
core.geometry.bezier2(
display_position,
display_position.plus(data.direction.scaled(32 * max_range)),
clamped_progress,
progress_denominator * max_range,
),
directionToRotation(data.direction) +% arrow_sprite_normalizing_rotation,
);
}
},
.kick => |coord| {
const kick_sprite_normalizing_rotation = 6;
textures.renderSpriteRotated(
renderer,
textures.sprites.kick,
display_position.plus(coord.scaled(32 * 1 / 2)),
directionToRotation(coord) +% kick_sprite_normalizing_rotation,
);
},
.polymorph => {
const sprites = textures.sprites.polymorph_effect[4..];
const sprite_index = @divTrunc(progress * @intCast(i32, sprites.len), progress_denominator);
textures.renderSprite(renderer, sprites[@intCast(usize, sprite_index)], display_position);
},
.death => {
textures.renderSprite(renderer, textures.sprites.death, display_position);
},
}
}
fn selectAesthetic(array: []const Rect, seed: u32, coord: Coord) Rect {
var hash = seed;
hash ^= @bitCast(u32, coord.x);
hash = hashU32(hash);
hash ^= @bitCast(u32, coord.y);
hash = hashU32(hash);
return array[@intCast(usize, std.rand.limitRangeBiased(u32, hash, @intCast(u32, array.len)))];
}
fn hashU32(input: u32) u32 {
// https://nullprogram.com/blog/2018/07/31/
var x = input;
x ^= x >> 17;
x *%= 0xed5ad4bb;
x ^= x >> 11;
x *%= 0xac4c1b51;
x ^= x >> 15;
x *%= 0x31848bab;
x ^= x >> 14;
return x;
}
fn speciesToSprite(species: Species) Rect {
return switch (species) {
.human => textures.sprites.human,
.orc => textures.sprites.orc,
.centaur => textures.sprites.centaur_archer,
.turtle => textures.sprites.turtle,
.rhino => textures.sprites.rhino[0],
.kangaroo => textures.sprites.kangaroo,
};
}
fn speciesToTailSprite(species: Species) Rect {
return switch (species) {
.rhino => textures.sprites.rhino[1],
else => unreachable,
};
}
const Animations = struct {
start_time: i32,
frames: []PerceivedFrame,
frame_index_to_aesthetic_offset: []Coord,
};
const AttackAnimation = struct {};
const DeathAnimation = struct {};
fn loadAnimations(frames: []PerceivedFrame, now: i32) !Animations {
var frame_index_to_aesthetic_offset = try allocator.alloc(Coord, frames.len);
var current_offset = makeCoord(0, 0);
for (frames) |frame, i| {
frame_index_to_aesthetic_offset[i] = current_offset;
switch (frame.self.activity) {
.movement => |move_delta| {
current_offset = current_offset.plus(move_delta);
},
else => {},
}
}
return Animations{
.start_time = now,
.frames = try core.protocol.deepClone(allocator, frames),
.frame_index_to_aesthetic_offset = frame_index_to_aesthetic_offset,
};
} | src/gui/gui_main.zig |
const std = @import("std");
const mem = std.mem;
const Allocator = mem.Allocator;
const testing = std.testing;
const builtin = @import("builtin");
const assert = std.debug.assert;
const is_debug = builtin.mode == .Debug;
/// An entity ID uniquely identifies an entity globally within an Entities set.
pub const EntityID = u64;
const TypeId = enum(usize) { _ };
// typeId implementation by Felix "xq" Queißner
fn typeId(comptime T: type) TypeId {
_ = T;
return @intToEnum(TypeId, @ptrToInt(&struct {
var x: u8 = 0;
}.x));
}
const Column = struct {
name: []const u8,
typeId: TypeId,
size: u32,
alignment: u16,
offset: usize,
};
fn by_alignment_name(context: void, lhs: Column, rhs: Column) bool {
_ = context;
if (lhs.alignment < rhs.alignment) return true;
return std.mem.lessThan(u8, lhs.name, rhs.name);
}
/// Represents a single archetype, that is, entities which have the same exact set of component
/// types. When a component is added or removed from an entity, it's archetype changes.
///
/// Database equivalent: a table where rows are entities and columns are components (dense storage).
pub const ArchetypeStorage = struct {
allocator: Allocator,
/// The hash of every component name in this archetype, i.e. the name of this archetype.
hash: u64,
/// The length of the table (used number of rows.)
len: u32,
/// The capacity of the table (allocated number of rows.)
capacity: u32,
/// Describes the columns stored in the `block` of memory, sorted by the smallest alignment
/// value.
columns: []Column,
/// The block of memory where all entities of this archetype are actually stored. This memory is
/// laid out as contiguous column values (i.e. the same way MultiArrayList works, SoA style)
/// so `[col1_val1, col1_val2, col2_val1, col2_val2, ...]`. The number of rows is always
/// identical (the `ArchetypeStorage.capacity`), and an "id" column is always present (the
/// entity IDs stored in the table.) The value names, size, and alignments are described by the
/// `ArchetypeStorage.columns` slice.
///
/// When necessary, padding is added between the column value *arrays* in order to achieve
/// alignment.
block: []u8,
/// Calculates the storage.hash value. This is a hash of all the component names, and can
/// effectively be used to uniquely identify this table within the database.
pub fn calculateHash(storage: *ArchetypeStorage) void {
storage.hash = 0;
for (storage.columns) |column| {
storage.hash ^= std.hash_map.hashString(column.name);
}
}
pub fn deinit(storage: *ArchetypeStorage, gpa: Allocator) void {
gpa.free(storage.columns);
}
fn debugValidateRow(storage: *ArchetypeStorage, gpa: Allocator, row: anytype) void {
inline for (std.meta.fields(@TypeOf(row))) |field, index| {
const column = storage.columns[index];
if (typeId(field.field_type) != column.typeId) {
const msg = std.mem.concat(gpa, u8, &.{
"unexpected type: ",
@typeName(field.field_type),
" expected: ",
column.name,
}) catch |err| @panic(@errorName(err));
@panic(msg);
}
}
}
/// appends a new row to this table, with all undefined values.
pub fn appendUndefined(storage: *ArchetypeStorage, gpa: Allocator) !u32 {
try storage.ensureUnusedCapacity(gpa, 1);
assert(storage.len < storage.capacity);
const row_index = storage.len;
storage.len += 1;
return row_index;
}
pub fn append(storage: *ArchetypeStorage, gpa: Allocator, row: anytype) !u32 {
if (is_debug) storage.debugValidateRow(gpa, row);
try storage.ensureUnusedCapacity(gpa, 1);
assert(storage.len < storage.capacity);
storage.len += 1;
storage.setRow(gpa, storage.len - 1, row);
return storage.len;
}
pub fn undoAppend(storage: *ArchetypeStorage) void {
storage.len -= 1;
}
/// Ensures there is enough unused capacity to store `num_rows`.
pub fn ensureUnusedCapacity(storage: *ArchetypeStorage, gpa: Allocator, num_rows: usize) !void {
return storage.ensureTotalCapacity(gpa, storage.len + num_rows);
}
/// Ensures the total capacity is enough to store `new_capacity` rows total.
pub fn ensureTotalCapacity(storage: *ArchetypeStorage, gpa: Allocator, new_capacity: usize) !void {
var better_capacity = storage.capacity;
if (better_capacity >= new_capacity) return;
while (true) {
better_capacity += better_capacity / 2 + 8;
if (better_capacity >= new_capacity) break;
}
return storage.setCapacity(gpa, better_capacity);
}
/// Sets the capacity to exactly `new_capacity` rows total
///
/// Asserts `new_capacity >= storage.len`, if you want to shrink capacity then change the len
/// yourself first.
pub fn setCapacity(storage: *ArchetypeStorage, gpa: Allocator, new_capacity: usize) !void {
assert(storage.capacity >= storage.len);
// TODO: ensure columns are sorted by alignment
var new_capacity_bytes: usize = 0;
for (storage.columns) |*column| {
const max_padding = column.alignment - 1;
new_capacity_bytes += max_padding;
new_capacity_bytes += new_capacity * column.size;
}
const new_block = try gpa.alloc(u8, new_capacity_bytes);
var offset: usize = 0;
for (storage.columns) |*column| {
const addr = @ptrToInt(&new_block[offset]);
const aligned_addr = std.mem.alignForward(addr, column.alignment);
const padding = aligned_addr - addr;
offset += padding;
if (storage.capacity > 0) {
const slice = storage.block[column.offset .. column.offset + storage.capacity * column.size];
mem.copy(u8, new_block[offset..], slice);
}
column.offset = offset;
offset += new_capacity * column.size;
}
storage.block = new_block;
storage.capacity = @intCast(u32, new_capacity);
}
/// Sets the entire row's values in the table.
pub fn setRow(storage: *ArchetypeStorage, gpa: Allocator, row_index: u32, row: anytype) void {
if (is_debug) storage.debugValidateRow(gpa, row);
const fields = std.meta.fields(@TypeOf(row));
inline for (fields) |field, index| {
const ColumnType = field.field_type;
const column = storage.columns[index];
const columnValues = @ptrCast([*]ColumnType, @alignCast(@alignOf(ColumnType), &storage.block[column.offset]));
columnValues[row_index] = @field(row, field.name);
}
}
/// Sets the value of the named components (columns) for the given row in the table.
pub fn set(storage: *ArchetypeStorage, gpa: Allocator, row_index: u32, name: []const u8, component: anytype) void {
const ColumnType = @TypeOf(component);
for (storage.columns) |column| {
if (!std.mem.eql(u8, column.name, name)) continue;
if (is_debug) {
if (typeId(ColumnType) != column.typeId) {
const msg = std.mem.concat(gpa, u8, &.{
"unexpected type: ",
@typeName(ColumnType),
" expected: ",
column.name,
}) catch |err| @panic(@errorName(err));
@panic(msg);
}
}
const columnValues = @ptrCast([*]ColumnType, @alignCast(@alignOf(ColumnType), &storage.block[column.offset]));
columnValues[row_index] = component;
return;
}
@panic("no such component");
}
pub fn get(storage: *ArchetypeStorage, gpa: Allocator, row_index: u32, name: []const u8, comptime ColumnType: type) ?ColumnType {
for (storage.columns) |column| {
if (!std.mem.eql(u8, column.name, name)) continue;
if (is_debug) {
if (typeId(ColumnType) != column.typeId) {
const msg = std.mem.concat(gpa, u8, &.{
"unexpected type: ",
@typeName(ColumnType),
" expected: ",
column.name,
}) catch |err| @panic(@errorName(err));
@panic(msg);
}
}
const columnValues = @ptrCast([*]ColumnType, @alignCast(@alignOf(ColumnType), &storage.block[column.offset]));
return columnValues[row_index];
}
return null;
}
pub fn getRaw(storage: *ArchetypeStorage, row_index: u32, name: []const u8) []u8 {
for (storage.columns) |column| {
if (!std.mem.eql(u8, column.name, name)) continue;
const start = column.offset + (column.size * row_index);
return storage.block[start .. start + (column.size)];
}
@panic("no such component");
}
pub fn setRaw(storage: *ArchetypeStorage, row_index: u32, column: Column, component: []u8) !void {
if (is_debug) {
const ok = blk: {
for (storage.columns) |col| {
if (std.mem.eql(u8, col.name, column.name)) {
break :blk true;
}
}
break :blk false;
};
if (!ok) @panic("setRaw with non-matching column");
}
mem.copy(u8, storage.block[column.offset + (row_index * column.size) ..], component);
}
/// Swap-removes the specified row with the last row in the table.
pub fn remove(storage: *ArchetypeStorage, row_index: u32) void {
if (storage.len > 1) {
for (storage.columns) |column| {
const dstStart = column.offset + (column.size * row_index);
const dst = storage.block[dstStart .. dstStart + (column.size)];
const srcStart = column.offset + (column.size * (storage.len - 1));
const src = storage.block[srcStart .. srcStart + (column.size)];
std.mem.copy(u8, dst, src);
}
}
storage.len -= 1;
}
/// Tells if this archetype has every one of the given components.
pub fn hasComponents(storage: *ArchetypeStorage, components: []const []const u8) bool {
for (components) |component_name| {
if (!storage.hasComponent(component_name)) return false;
}
return true;
}
/// Tells if this archetype has a component with the specified name.
pub fn hasComponent(storage: *ArchetypeStorage, component: []const u8) bool {
for (storage.columns) |column| {
if (std.mem.eql(u8, column.name, component)) return true;
}
return false;
}
};
pub const void_archetype_hash = std.math.maxInt(u64);
/// A database of entities. For example, all player, monster, etc. entities in a game world.
///
/// ```
/// const world = Entities.init(allocator); // all entities in our world.
/// defer world.deinit();
///
/// const player1 = world.new(); // our first "player" entity
/// const player2 = world.new(); // our second "player" entity
/// ```
///
/// Entities are divided into archetypes for optimal, CPU cache efficient storage. For example, all
/// entities with two components `Location` and `Name` are stored in the same table dedicated to
/// densely storing `(Location, Name)` rows in contiguous memory. This not only ensures CPU cache
/// efficiency (leveraging data oriented design) which improves iteration speed over entities for
/// example, but makes queries like "find all entities with a Location component" ridiculously fast
/// because one need only find the tables which have a column for storing Location components and it
/// is then guaranteed every entity in the table has that component (entities do not need to be
/// checked one by one to determine if they have a Location component.)
///
/// Components can be added and removed to entities at runtime as you please:
///
/// ```
/// try player1.set("rotation", Rotation{ .degrees = 90 });
/// try player1.remove("rotation");
/// ```
///
/// When getting a component value, you must know it's type or undefined behavior will occur:
/// TODO: improve this!
///
/// ```
/// if (player1.get("rotation", Rotation)) |rotation| {
/// // player1 had a rotation component!
/// }
/// ```
///
/// When a component is added or removed from an entity, it's archetype is said to change. For
/// example player1 may have had the archetype `(Location, Name)` before, and after adding the
/// rotation component has the archetype `(Location, Name, Rotation)`. It will be automagically
/// "moved" from the table that stores entities with `(Location, Name)` components to the table that
/// stores `(Location, Name, Rotation)` components for you.
///
/// You can have 65,535 archetypes in total, and 4,294,967,295 entities total. Entities which are
/// deleted are merely marked as "unused" and recycled
///
/// Database equivalents:
/// * Entities is a database of tables, where each table represents a single archetype.
/// * ArchetypeStorage is a table, whose rows are entities and columns are components.
/// * EntityID is a mere 32-bit array index, pointing to a 16-bit archetype table index and 32-bit
/// row index, enabling entities to "move" from one archetype table to another seamlessly and
/// making lookup by entity ID a few cheap array indexing operations.
/// * ComponentStorage(T) is a column of data within a table for a single type of component `T`.
pub const Entities = struct {
allocator: Allocator,
/// TODO!
counter: EntityID = 0,
/// A mapping of entity IDs (array indices) to where an entity's component values are actually
/// stored.
entities: std.AutoHashMapUnmanaged(EntityID, Pointer) = .{},
/// A mapping of archetype hash to their storage.
///
/// Database equivalent: table name -> tables representing entities.
archetypes: std.AutoArrayHashMapUnmanaged(u64, ArchetypeStorage) = .{},
/// Points to where an entity is stored, specifically in which archetype table and in which row
/// of that table. That is, the entity's component values are stored at:
///
/// ```
/// Entities.archetypes[ptr.archetype_index].rows[ptr.row_index]
/// ```
///
pub const Pointer = struct {
archetype_index: u16,
row_index: u32,
};
pub const Iterator = struct {
entities: *Entities,
components: []const []const u8,
archetype_index: usize = 0,
row_index: u32 = 0,
pub const Entry = struct {
entity: EntityID,
pub fn unlock(e: Entry) void {
_ = e;
}
};
pub fn next(iter: *Iterator) ?Entry {
const entities = iter.entities;
// If the archetype table we're looking at does not contain the components we're
// querying for, keep searching through tables until we find one that does.
var archetype = entities.archetypes.entries.get(iter.archetype_index).value;
while (!archetype.hasComponents(iter.components) or iter.row_index >= archetype.len) {
iter.archetype_index += 1;
iter.row_index = 0;
if (iter.archetype_index >= entities.archetypes.count()) {
return null;
}
archetype = entities.archetypes.entries.get(iter.archetype_index).value;
}
const row_entity_id = archetype.get(iter.entities.allocator, iter.row_index, "id", EntityID).?;
iter.row_index += 1;
return Entry{ .entity = row_entity_id };
}
};
pub fn query(entities: *Entities, components: []const []const u8) Iterator {
return Iterator{
.entities = entities,
.components = components,
};
}
pub fn init(allocator: Allocator) !Entities {
var entities = Entities{ .allocator = allocator };
const columns = try allocator.alloc(Column, 1);
columns[0] = .{
.name = "id",
.typeId = typeId(EntityID),
.size = @sizeOf(EntityID),
.alignment = @alignOf(EntityID),
.offset = undefined,
};
try entities.archetypes.put(allocator, void_archetype_hash, ArchetypeStorage{
.allocator = allocator,
.len = 0,
.capacity = 0,
.columns = columns,
.block = undefined,
.hash = void_archetype_hash,
});
return entities;
}
pub fn deinit(entities: *Entities) void {
entities.entities.deinit(entities.allocator);
var iter = entities.archetypes.iterator();
while (iter.next()) |entry| {
entities.allocator.free(entry.value_ptr.block);
entry.value_ptr.deinit(entities.allocator);
}
entities.archetypes.deinit(entities.allocator);
}
/// Returns a new entity.
pub fn new(entities: *Entities) !EntityID {
const new_id = entities.counter;
entities.counter += 1;
var void_archetype = entities.archetypes.getPtr(void_archetype_hash).?;
const new_row = try void_archetype.append(entities.allocator, .{ .id = new_id });
const void_pointer = Pointer{
.archetype_index = 0, // void archetype is guaranteed to be first index
.row_index = new_row,
};
entities.entities.put(entities.allocator, new_id, void_pointer) catch |err| {
void_archetype.undoAppend();
return err;
};
return new_id;
}
/// Removes an entity.
pub fn remove(entities: *Entities, entity: EntityID) !void {
var archetype = entities.archetypeByID(entity);
const ptr = entities.entities.get(entity).?;
// A swap removal will be performed, update the entity stored in the last row of the
// archetype table to point to the row the entity we are removing is currently located.
if (archetype.len > 1) {
const last_row_entity_id = archetype.get(entities.allocator, archetype.len - 1, "id", EntityID).?;
try entities.entities.put(entities.allocator, last_row_entity_id, Pointer{
.archetype_index = ptr.archetype_index,
.row_index = ptr.row_index,
});
}
// Perform a swap removal to remove our entity from the archetype table.
archetype.remove(ptr.row_index);
_ = entities.entities.remove(entity);
}
/// Returns the archetype storage for the given entity.
pub inline fn archetypeByID(entities: *Entities, entity: EntityID) *ArchetypeStorage {
const ptr = entities.entities.get(entity).?;
return &entities.archetypes.values()[ptr.archetype_index];
}
/// Sets the named component to the specified value for the given entity,
/// moving the entity from it's current archetype table to the new archetype
/// table if required.
pub fn setComponent(entities: *Entities, entity: EntityID, comptime name: []const u8, component: anytype) !void {
var archetype = entities.archetypeByID(entity);
// Determine the old hash for the archetype.
const old_hash = archetype.hash;
// Determine the new hash for the archetype + new component
var have_already = archetype.hasComponent(name);
const new_hash = if (have_already) old_hash else old_hash ^ std.hash_map.hashString(name);
// Find the archetype storage for this entity. Could be a new archetype storage table (if a
// new component was added), or the same archetype storage table (if just updating the
// value of a component.)
var archetype_entry = try entities.archetypes.getOrPut(entities.allocator, new_hash);
if (!archetype_entry.found_existing) {
// getOrPut allocated, so the archetype we retrieved earlier may no longer be a valid
// pointer. Refresh it now:
archetype = entities.archetypeByID(entity);
const columns = entities.allocator.alloc(Column, archetype.columns.len + 1) catch |err| {
assert(entities.archetypes.swapRemove(new_hash));
return err;
};
mem.copy(Column, columns, archetype.columns);
columns[columns.len - 1] = .{
.name = name,
.typeId = typeId(@TypeOf(component)),
.size = @sizeOf(@TypeOf(component)),
.alignment = @alignOf(@TypeOf(component)),
.offset = undefined,
};
std.sort.sort(Column, columns, {}, by_alignment_name);
archetype_entry.value_ptr.* = ArchetypeStorage{
.allocator = entities.allocator,
.len = 0,
.capacity = 0,
.columns = columns,
.block = undefined,
.hash = undefined,
};
const new_archetype = archetype_entry.value_ptr;
new_archetype.calculateHash();
}
// Either new storage (if the entity moved between storage tables due to having a new
// component) or the prior storage (if the entity already had the component and it's value
// is merely being updated.)
var current_archetype_storage = archetype_entry.value_ptr;
if (new_hash == old_hash) {
// Update the value of the existing component of the entity.
const ptr = entities.entities.get(entity).?;
current_archetype_storage.set(entities.allocator, ptr.row_index, name, component);
return;
}
// Copy to all component values for our entity from the old archetype storage (archetype)
// to the new one (current_archetype_storage).
const new_row = try current_archetype_storage.appendUndefined(entities.allocator);
const old_ptr = entities.entities.get(entity).?;
// Update the storage/columns for all of the existing components on the entity.
current_archetype_storage.set(entities.allocator, new_row, "id", entity);
for (archetype.columns) |column| {
if (std.mem.eql(u8, column.name, "id")) continue;
for (current_archetype_storage.columns) |corresponding| {
if (std.mem.eql(u8, column.name, corresponding.name)) {
const old_value_raw = archetype.getRaw(old_ptr.row_index, column.name);
current_archetype_storage.setRaw(new_row, corresponding, old_value_raw) catch |err| {
current_archetype_storage.undoAppend();
return err;
};
break;
}
}
}
// Update the storage/column for the new component.
current_archetype_storage.set(entities.allocator, new_row, name, component);
var swapped_entity_id = archetype.get(entities.allocator, old_ptr.row_index, "id", EntityID).?;
archetype.remove(old_ptr.row_index);
// TODO: try is wrong here and below?
try entities.entities.put(entities.allocator, swapped_entity_id, old_ptr);
try entities.entities.put(entities.allocator, entity, Pointer{
.archetype_index = @intCast(u16, archetype_entry.index),
.row_index = new_row,
});
return;
}
/// gets the named component of the given type (which must be correct, otherwise undefined
/// behavior will occur). Returns null if the component does not exist on the entity.
pub fn getComponent(entities: *Entities, entity: EntityID, name: []const u8, comptime Component: type) ?Component {
var archetype = entities.archetypeByID(entity);
const ptr = entities.entities.get(entity).?;
return archetype.get(entities.allocator, ptr.row_index, name, Component);
}
/// Removes the named component from the entity, or noop if it doesn't have such a component.
pub fn removeComponent(entities: *Entities, entity: EntityID, name: []const u8) !void {
var archetype = entities.archetypeByID(entity);
if (!archetype.hasComponent(name)) return;
// Determine the old hash for the archetype.
const old_hash = archetype.hash;
// Determine the new hash for the archetype with the component removed
var new_hash: u64 = 0;
for (archetype.columns) |column| {
if (!std.mem.eql(u8, column.name, name)) new_hash ^= std.hash_map.hashString(column.name);
}
assert(new_hash != old_hash);
// Find the archetype storage this entity will move to. Note that although an entity with
// (A, B, C) components implies archetypes ((A), (A, B), (A, B, C)) exist there is no
// guarantee that archetype (A, C) exists - and so removing a component sometimes does
// require creating a new archetype table!
var archetype_entry = try entities.archetypes.getOrPut(entities.allocator, new_hash);
if (!archetype_entry.found_existing) {
// getOrPut allocated, so the archetype we retrieved earlier may no longer be a valid
// pointer. Refresh it now:
archetype = entities.archetypeByID(entity);
const columns = entities.allocator.alloc(Column, archetype.columns.len - 1) catch |err| {
assert(entities.archetypes.swapRemove(new_hash));
return err;
};
var i: usize = 0;
for (archetype.columns) |column| {
if (std.mem.eql(u8, column.name, name)) continue;
columns[i] = column;
i += 1;
}
archetype_entry.value_ptr.* = ArchetypeStorage{
.allocator = entities.allocator,
.len = 0,
.capacity = 0,
.columns = columns,
.block = undefined,
.hash = undefined,
};
const new_archetype = archetype_entry.value_ptr;
new_archetype.calculateHash();
}
var current_archetype_storage = archetype_entry.value_ptr;
// Copy to all component values for our entity from the old archetype storage (archetype)
// to the new one (current_archetype_storage).
const new_row = try current_archetype_storage.appendUndefined(entities.allocator);
const old_ptr = entities.entities.get(entity).?;
// Update the storage/columns for all of the existing components on the entity that exist in
// the new archetype table (i.e. excluding the component to remove.)
current_archetype_storage.set(entities.allocator, new_row, "id", entity);
for (current_archetype_storage.columns) |column| {
if (std.mem.eql(u8, column.name, "id")) continue;
for (archetype.columns) |corresponding| {
if (std.mem.eql(u8, column.name, corresponding.name)) {
const old_value_raw = archetype.getRaw(old_ptr.row_index, column.name);
current_archetype_storage.setRaw(new_row, column, old_value_raw) catch |err| {
current_archetype_storage.undoAppend();
return err;
};
break;
}
}
}
var swapped_entity_id = archetype.get(entities.allocator, old_ptr.row_index, "id", EntityID).?;
archetype.remove(old_ptr.row_index);
// TODO: try is wrong here and below?
try entities.entities.put(entities.allocator, swapped_entity_id, old_ptr);
try entities.entities.put(entities.allocator, entity, Pointer{
.archetype_index = @intCast(u16, archetype_entry.index),
.row_index = new_row,
});
}
// TODO: iteration over all entities
// TODO: iteration over all entities with components (U, V, ...)
// TODO: iteration over all entities with type T
// TODO: iteration over all entities with type T and components (U, V, ...)
// TODO: "indexes" - a few ideas we could express:
//
// * Graph relations index: e.g. parent-child entity relations for a DOM / UI / scene graph.
// * Spatial index: "give me all entities within 5 units distance from (x, y, z)"
// * Generic index: "give me all entities where arbitraryFunction(e) returns true"
//
// TODO: ability to remove archetype entirely, deleting all entities in it
// TODO: ability to remove archetypes with no entities (garbage collection)
};
test "entity ID size" {
try testing.expectEqual(8, @sizeOf(EntityID));
}
test "example" {
const allocator = testing.allocator;
//-------------------------------------------------------------------------
// Create a world.
var world = try Entities.init(allocator);
defer world.deinit();
//-------------------------------------------------------------------------
// Define component types, any Zig type will do!
// A location component.
const Location = struct {
x: f32 = 0,
y: f32 = 0,
z: f32 = 0,
};
//-------------------------------------------------------------------------
// Create first player entity.
var player1 = try world.new();
try world.setComponent(player1, "name", "jane"); // add Name component
try world.setComponent(player1, "location", Location{}); // add Location component
// Create second player entity.
var player2 = try world.new();
try testing.expect(world.getComponent(player2, "location", Location) == null);
try testing.expect(world.getComponent(player2, "name", []const u8) == null);
//-------------------------------------------------------------------------
// We can add new components at will.
const Rotation = struct { degrees: f32 };
try world.setComponent(player2, "rotation", Rotation{ .degrees = 90 });
try testing.expect(world.getComponent(player1, "rotation", Rotation) == null); // player1 has no rotation
//-------------------------------------------------------------------------
// Remove a component from any entity at will.
// TODO: add a way to "cleanup" truly unused archetypes
try world.removeComponent(player1, "name");
try world.removeComponent(player1, "location");
try world.removeComponent(player1, "location"); // doesn't exist? no problem.
//-------------------------------------------------------------------------
// Introspect things.
//
// Archetype IDs, these are our "table names" - they're just hashes of all the component names
// within the archetype table.
var archetypes = world.archetypes.keys();
try testing.expectEqual(@as(usize, 6), archetypes.len);
try testing.expectEqual(@as(u64, void_archetype_hash), archetypes[0]);
try testing.expectEqual(@as(u64, 6893717443977936573), archetypes[1]);
try testing.expectEqual(@as(u64, 6672640730301731073), archetypes[2]);
try testing.expectEqual(@as(u64, 14420739110802803032), archetypes[3]);
try testing.expectEqual(@as(u64, 18216325908396511299), archetypes[4]);
try testing.expectEqual(@as(u64, 4457032469566706731), archetypes[5]);
// Number of (living) entities stored in an archetype table.
try testing.expectEqual(@as(usize, 0), world.archetypes.get(archetypes[0]).?.len);
try testing.expectEqual(@as(usize, 0), world.archetypes.get(archetypes[1]).?.len);
try testing.expectEqual(@as(usize, 0), world.archetypes.get(archetypes[2]).?.len);
try testing.expectEqual(@as(usize, 1), world.archetypes.get(archetypes[3]).?.len);
try testing.expectEqual(@as(usize, 0), world.archetypes.get(archetypes[4]).?.len);
try testing.expectEqual(@as(usize, 1), world.archetypes.get(archetypes[5]).?.len);
// Components for a given archetype.
var columns = world.archetypes.get(archetypes[2]).?.columns;
try testing.expectEqual(@as(usize, 3), columns.len);
try testing.expectEqualStrings("location", columns[0].name);
try testing.expectEqualStrings("id", columns[1].name);
try testing.expectEqualStrings("name", columns[2].name);
// Archetype resolved via entity ID
var player2_archetype = world.archetypeByID(player2);
try testing.expectEqual(@as(u64, 722178222806262412), player2_archetype.hash);
// TODO: iterating components an entity has not currently supported.
//-------------------------------------------------------------------------
// Remove an entity whenever you wish. Just be sure not to try and use it later!
try world.remove(player1);
} | ecs/src/entities.zig |
const std = @import("std");
const testing = std.testing;
const assert = std.debug.assert;
const clang = @import("clang.zig");
const ctok = std.c.tokenizer;
const CToken = std.c.Token;
const mem = std.mem;
const math = std.math;
const meta = std.meta;
const ast = @import("translate_c/ast.zig");
const Node = ast.Node;
const Tag = Node.Tag;
const CallingConvention = std.builtin.CallingConvention;
pub const ClangErrMsg = clang.Stage2ErrorMsg;
pub const Error = std.mem.Allocator.Error;
const MacroProcessingError = Error || error{UnexpectedMacroToken};
const TypeError = Error || error{UnsupportedType};
const TransError = TypeError || error{UnsupportedTranslation};
const SymbolTable = std.StringArrayHashMap(Node);
const AliasList = std.ArrayList(struct {
alias: []const u8,
name: []const u8,
});
// Maps macro parameter names to token position, for determining if different
// identifiers refer to the same positional argument in different macros.
const ArgsPositionMap = std.StringArrayHashMapUnmanaged(usize);
const Scope = struct {
id: Id,
parent: ?*Scope,
const Id = enum {
block,
root,
condition,
loop,
do_loop,
};
/// Used for the scope of condition expressions, for example `if (cond)`.
/// The block is lazily initialised because it is only needed for rare
/// cases of comma operators being used.
const Condition = struct {
base: Scope,
block: ?Block = null,
fn getBlockScope(self: *Condition, c: *Context) !*Block {
if (self.block) |*b| return b;
self.block = try Block.init(c, &self.base, true);
return &self.block.?;
}
fn deinit(self: *Condition) void {
if (self.block) |*b| b.deinit();
}
};
/// Represents an in-progress Node.Block. This struct is stack-allocated.
/// When it is deinitialized, it produces an Node.Block which is allocated
/// into the main arena.
const Block = struct {
base: Scope,
statements: std.ArrayList(Node),
variables: AliasList,
mangle_count: u32 = 0,
label: ?[]const u8 = null,
/// By default all variables are discarded, since we do not know in advance if they
/// will be used. This maps the variable's name to the Discard payload, so that if
/// the variable is subsequently referenced we can indicate that the discard should
/// be skipped during the intermediate AST -> Zig AST render step.
variable_discards: std.StringArrayHashMap(*ast.Payload.Discard),
/// When the block corresponds to a function, keep track of the return type
/// so that the return expression can be cast, if necessary
return_type: ?clang.QualType = null,
/// C static local variables are wrapped in a block-local struct. The struct
/// is named after the (mangled) variable name, the Zig variable within the
/// struct itself is given this name.
const StaticInnerName = "static";
fn init(c: *Context, parent: *Scope, labeled: bool) !Block {
var blk = Block{
.base = .{
.id = .block,
.parent = parent,
},
.statements = std.ArrayList(Node).init(c.gpa),
.variables = AliasList.init(c.gpa),
.variable_discards = std.StringArrayHashMap(*ast.Payload.Discard).init(c.gpa),
};
if (labeled) {
blk.label = try blk.makeMangledName(c, "blk");
}
return blk;
}
fn deinit(self: *Block) void {
self.statements.deinit();
self.variables.deinit();
self.variable_discards.deinit();
self.* = undefined;
}
fn complete(self: *Block, c: *Context) !Node {
if (self.base.parent.?.id == .do_loop) {
// We reserve 1 extra statement if the parent is a do_loop. This is in case of
// do while, we want to put `if (cond) break;` at the end.
const alloc_len = self.statements.items.len + @boolToInt(self.base.parent.?.id == .do_loop);
var stmts = try c.arena.alloc(Node, alloc_len);
stmts.len = self.statements.items.len;
mem.copy(Node, stmts, self.statements.items);
return Tag.block.create(c.arena, .{
.label = self.label,
.stmts = stmts,
});
}
if (self.statements.items.len == 0) return Tag.empty_block.init();
return Tag.block.create(c.arena, .{
.label = self.label,
.stmts = try c.arena.dupe(Node, self.statements.items),
});
}
/// Given the desired name, return a name that does not shadow anything from outer scopes.
/// Inserts the returned name into the scope.
fn makeMangledName(scope: *Block, c: *Context, name: []const u8) ![]const u8 {
const name_copy = try c.arena.dupe(u8, name);
var proposed_name = name_copy;
while (scope.contains(proposed_name)) {
scope.mangle_count += 1;
proposed_name = try std.fmt.allocPrint(c.arena, "{s}_{d}", .{ name, scope.mangle_count });
}
try scope.variables.append(.{ .name = name_copy, .alias = proposed_name });
return proposed_name;
}
fn getAlias(scope: *Block, name: []const u8) []const u8 {
for (scope.variables.items) |p| {
if (mem.eql(u8, p.name, name))
return p.alias;
}
return scope.base.parent.?.getAlias(name);
}
fn localContains(scope: *Block, name: []const u8) bool {
for (scope.variables.items) |p| {
if (mem.eql(u8, p.alias, name))
return true;
}
return false;
}
fn contains(scope: *Block, name: []const u8) bool {
if (scope.localContains(name))
return true;
return scope.base.parent.?.contains(name);
}
fn discardVariable(scope: *Block, c: *Context, name: []const u8) Error!void {
const name_node = try Tag.identifier.create(c.arena, name);
const discard = try Tag.discard.create(c.arena, .{ .should_skip = false, .value = name_node });
try scope.statements.append(discard);
try scope.variable_discards.putNoClobber(name, discard.castTag(.discard).?);
}
};
const Root = struct {
base: Scope,
sym_table: SymbolTable,
macro_table: SymbolTable,
context: *Context,
nodes: std.ArrayList(Node),
fn init(c: *Context) Root {
return .{
.base = .{
.id = .root,
.parent = null,
},
.sym_table = SymbolTable.init(c.gpa),
.macro_table = SymbolTable.init(c.gpa),
.context = c,
.nodes = std.ArrayList(Node).init(c.gpa),
};
}
fn deinit(scope: *Root) void {
scope.sym_table.deinit();
scope.macro_table.deinit();
scope.nodes.deinit();
}
/// Check if the global scope contains this name, without looking into the "future", e.g.
/// ignore the preprocessed decl and macro names.
fn containsNow(scope: *Root, name: []const u8) bool {
return scope.sym_table.contains(name) or scope.macro_table.contains(name);
}
/// Check if the global scope contains the name, includes all decls that haven't been translated yet.
fn contains(scope: *Root, name: []const u8) bool {
return scope.containsNow(name) or scope.context.global_names.contains(name);
}
};
fn findBlockScope(inner: *Scope, c: *Context) !*Scope.Block {
var scope = inner;
while (true) {
switch (scope.id) {
.root => unreachable,
.block => return @fieldParentPtr(Block, "base", scope),
.condition => return @fieldParentPtr(Condition, "base", scope).getBlockScope(c),
else => scope = scope.parent.?,
}
}
}
fn findBlockReturnType(inner: *Scope, c: *Context) clang.QualType {
_ = c;
var scope = inner;
while (true) {
switch (scope.id) {
.root => unreachable,
.block => {
const block = @fieldParentPtr(Block, "base", scope);
if (block.return_type) |qt| return qt;
scope = scope.parent.?;
},
else => scope = scope.parent.?,
}
}
}
fn getAlias(scope: *Scope, name: []const u8) []const u8 {
return switch (scope.id) {
.root => return name,
.block => @fieldParentPtr(Block, "base", scope).getAlias(name),
.loop, .do_loop, .condition => scope.parent.?.getAlias(name),
};
}
fn contains(scope: *Scope, name: []const u8) bool {
return switch (scope.id) {
.root => @fieldParentPtr(Root, "base", scope).contains(name),
.block => @fieldParentPtr(Block, "base", scope).contains(name),
.loop, .do_loop, .condition => scope.parent.?.contains(name),
};
}
fn getBreakableScope(inner: *Scope) *Scope {
var scope = inner;
while (true) {
switch (scope.id) {
.root => unreachable,
.loop, .do_loop => return scope,
else => scope = scope.parent.?,
}
}
}
/// Appends a node to the first block scope if inside a function, or to the root tree if not.
fn appendNode(inner: *Scope, node: Node) !void {
var scope = inner;
while (true) {
switch (scope.id) {
.root => {
const root = @fieldParentPtr(Root, "base", scope);
return root.nodes.append(node);
},
.block => {
const block = @fieldParentPtr(Block, "base", scope);
return block.statements.append(node);
},
else => scope = scope.parent.?,
}
}
}
fn skipVariableDiscard(inner: *Scope, name: []const u8) void {
var scope = inner;
while (true) {
switch (scope.id) {
.root => return,
.block => {
const block = @fieldParentPtr(Block, "base", scope);
if (block.variable_discards.get(name)) |discard| {
discard.data.should_skip = true;
return;
}
},
else => {},
}
scope = scope.parent.?;
}
}
};
pub const Context = struct {
gpa: *mem.Allocator,
arena: *mem.Allocator,
source_manager: *clang.SourceManager,
decl_table: std.AutoArrayHashMapUnmanaged(usize, []const u8) = .{},
alias_list: AliasList,
global_scope: *Scope.Root,
clang_context: *clang.ASTContext,
mangle_count: u32 = 0,
/// Table of record decls that have been demoted to opaques.
opaque_demotes: std.AutoHashMapUnmanaged(usize, void) = .{},
/// Table of unnamed enums and records that are child types of typedefs.
unnamed_typedefs: std.AutoHashMapUnmanaged(usize, []const u8) = .{},
/// Needed to decide if we are parsing a typename
typedefs: std.StringArrayHashMapUnmanaged(void) = .{},
/// This one is different than the root scope's name table. This contains
/// a list of names that we found by visiting all the top level decls without
/// translating them. The other maps are updated as we translate; this one is updated
/// up front in a pre-processing step.
global_names: std.StringArrayHashMapUnmanaged(void) = .{},
pattern_list: PatternList,
fn getMangle(c: *Context) u32 {
c.mangle_count += 1;
return c.mangle_count;
}
/// Convert a null-terminated C string to a slice allocated in the arena
fn str(c: *Context, s: [*:0]const u8) ![]u8 {
return mem.dupe(c.arena, u8, mem.spanZ(s));
}
/// Convert a clang source location to a file:line:column string
fn locStr(c: *Context, loc: clang.SourceLocation) ![]u8 {
const spelling_loc = c.source_manager.getSpellingLoc(loc);
const filename_c = c.source_manager.getFilename(spelling_loc);
const filename = if (filename_c) |s| try c.str(s) else @as([]const u8, "(no file)");
const line = c.source_manager.getSpellingLineNumber(spelling_loc);
const column = c.source_manager.getSpellingColumnNumber(spelling_loc);
return std.fmt.allocPrint(c.arena, "{s}:{d}:{d}", .{ filename, line, column });
}
};
pub fn translate(
gpa: *mem.Allocator,
args_begin: [*]?[*]const u8,
args_end: [*]?[*]const u8,
errors: *[]ClangErrMsg,
resources_path: [*:0]const u8,
) !std.zig.ast.Tree {
const ast_unit = clang.LoadFromCommandLine(
args_begin,
args_end,
&errors.ptr,
&errors.len,
resources_path,
) orelse {
if (errors.len == 0) return error.ASTUnitFailure;
return error.SemanticAnalyzeFail;
};
defer ast_unit.delete();
// For memory that has the same lifetime as the Tree that we return
// from this function.
var arena = std.heap.ArenaAllocator.init(gpa);
errdefer arena.deinit();
var context = Context{
.gpa = gpa,
.arena = &arena.allocator,
.source_manager = ast_unit.getSourceManager(),
.alias_list = AliasList.init(gpa),
.global_scope = try arena.allocator.create(Scope.Root),
.clang_context = ast_unit.getASTContext(),
.pattern_list = try PatternList.init(gpa),
};
context.global_scope.* = Scope.Root.init(&context);
defer {
context.decl_table.deinit(gpa);
context.alias_list.deinit();
context.global_names.deinit(gpa);
context.opaque_demotes.deinit(gpa);
context.unnamed_typedefs.deinit(gpa);
context.typedefs.deinit(gpa);
context.global_scope.deinit();
context.pattern_list.deinit(gpa);
}
try context.global_scope.nodes.append(Tag.usingnamespace_builtins.init());
try prepopulateGlobalNameTable(ast_unit, &context);
if (!ast_unit.visitLocalTopLevelDecls(&context, declVisitorC)) {
return error.OutOfMemory;
}
try transPreprocessorEntities(&context, ast_unit);
try addMacros(&context);
for (context.alias_list.items) |alias| {
if (!context.global_scope.sym_table.contains(alias.alias)) {
const node = try Tag.alias.create(context.arena, .{ .actual = alias.alias, .mangled = alias.name });
try addTopLevelDecl(&context, alias.alias, node);
}
}
return ast.render(gpa, context.global_scope.nodes.items);
}
fn prepopulateGlobalNameTable(ast_unit: *clang.ASTUnit, c: *Context) !void {
if (!ast_unit.visitLocalTopLevelDecls(c, declVisitorNamesOnlyC)) {
return error.OutOfMemory;
}
// TODO if we see #undef, delete it from the table
var it = ast_unit.getLocalPreprocessingEntities_begin();
const it_end = ast_unit.getLocalPreprocessingEntities_end();
while (it.I != it_end.I) : (it.I += 1) {
const entity = it.deref();
switch (entity.getKind()) {
.MacroDefinitionKind => {
const macro = @ptrCast(*clang.MacroDefinitionRecord, entity);
const raw_name = macro.getName_getNameStart();
const name = try c.str(raw_name);
try c.global_names.put(c.gpa, name, {});
},
else => {},
}
}
}
fn declVisitorNamesOnlyC(context: ?*c_void, decl: *const clang.Decl) callconv(.C) bool {
const c = @ptrCast(*Context, @alignCast(@alignOf(Context), context));
declVisitorNamesOnly(c, decl) catch return false;
return true;
}
fn declVisitorC(context: ?*c_void, decl: *const clang.Decl) callconv(.C) bool {
const c = @ptrCast(*Context, @alignCast(@alignOf(Context), context));
declVisitor(c, decl) catch return false;
return true;
}
fn declVisitorNamesOnly(c: *Context, decl: *const clang.Decl) Error!void {
if (decl.castToNamedDecl()) |named_decl| {
const decl_name = try c.str(named_decl.getName_bytes_begin());
try c.global_names.put(c.gpa, decl_name, {});
// Check for typedefs with unnamed enum/record child types.
if (decl.getKind() == .Typedef) {
const typedef_decl = @ptrCast(*const clang.TypedefNameDecl, decl);
var child_ty = typedef_decl.getUnderlyingType().getTypePtr();
const addr: usize = while (true) switch (child_ty.getTypeClass()) {
.Enum => {
const enum_ty = @ptrCast(*const clang.EnumType, child_ty);
const enum_decl = enum_ty.getDecl();
// check if this decl is unnamed
if (@ptrCast(*const clang.NamedDecl, enum_decl).getName_bytes_begin()[0] != 0) return;
break @ptrToInt(enum_decl.getCanonicalDecl());
},
.Record => {
const record_ty = @ptrCast(*const clang.RecordType, child_ty);
const record_decl = record_ty.getDecl();
// check if this decl is unnamed
if (@ptrCast(*const clang.NamedDecl, record_decl).getName_bytes_begin()[0] != 0) return;
break @ptrToInt(record_decl.getCanonicalDecl());
},
.Elaborated => {
const elaborated_ty = @ptrCast(*const clang.ElaboratedType, child_ty);
child_ty = elaborated_ty.getNamedType().getTypePtr();
},
.Decayed => {
const decayed_ty = @ptrCast(*const clang.DecayedType, child_ty);
child_ty = decayed_ty.getDecayedType().getTypePtr();
},
.Attributed => {
const attributed_ty = @ptrCast(*const clang.AttributedType, child_ty);
child_ty = attributed_ty.getEquivalentType().getTypePtr();
},
.MacroQualified => {
const macroqualified_ty = @ptrCast(*const clang.MacroQualifiedType, child_ty);
child_ty = macroqualified_ty.getModifiedType().getTypePtr();
},
else => return,
} else unreachable;
const result = try c.unnamed_typedefs.getOrPut(c.gpa, addr);
if (result.found_existing) {
// One typedef can declare multiple names.
// Don't put this one in `decl_table` so it's processed later.
return;
}
result.value_ptr.* = decl_name;
// Put this typedef in the decl_table to avoid redefinitions.
try c.decl_table.putNoClobber(c.gpa, @ptrToInt(typedef_decl.getCanonicalDecl()), decl_name);
try c.typedefs.put(c.gpa, decl_name, {});
}
}
}
fn declVisitor(c: *Context, decl: *const clang.Decl) Error!void {
switch (decl.getKind()) {
.Function => {
return visitFnDecl(c, @ptrCast(*const clang.FunctionDecl, decl));
},
.Typedef => {
try transTypeDef(c, &c.global_scope.base, @ptrCast(*const clang.TypedefNameDecl, decl));
},
.Enum => {
try transEnumDecl(c, &c.global_scope.base, @ptrCast(*const clang.EnumDecl, decl));
},
.Record => {
try transRecordDecl(c, &c.global_scope.base, @ptrCast(*const clang.RecordDecl, decl));
},
.Var => {
return visitVarDecl(c, @ptrCast(*const clang.VarDecl, decl), null);
},
.Empty => {
// Do nothing
},
.FileScopeAsm => {
try transFileScopeAsm(c, &c.global_scope.base, @ptrCast(*const clang.FileScopeAsmDecl, decl));
},
else => {
const decl_name = try c.str(decl.getDeclKindName());
try warn(c, &c.global_scope.base, decl.getLocation(), "ignoring {s} declaration", .{decl_name});
},
}
}
fn transFileScopeAsm(c: *Context, scope: *Scope, file_scope_asm: *const clang.FileScopeAsmDecl) Error!void {
const asm_string = file_scope_asm.getAsmString();
var len: usize = undefined;
const bytes_ptr = asm_string.getString_bytes_begin_size(&len);
const str = try std.fmt.allocPrint(c.arena, "\"{}\"", .{std.zig.fmtEscapes(bytes_ptr[0..len])});
const str_node = try Tag.string_literal.create(c.arena, str);
const asm_node = try Tag.asm_simple.create(c.arena, str_node);
const block = try Tag.block_single.create(c.arena, asm_node);
const comptime_node = try Tag.@"comptime".create(c.arena, block);
try scope.appendNode(comptime_node);
}
fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void {
const fn_name = try c.str(@ptrCast(*const clang.NamedDecl, fn_decl).getName_bytes_begin());
if (c.global_scope.sym_table.contains(fn_name))
return; // Avoid processing this decl twice
// Skip this declaration if a proper definition exists
if (!fn_decl.isThisDeclarationADefinition()) {
if (fn_decl.getDefinition()) |def|
return visitFnDecl(c, def);
}
const fn_decl_loc = fn_decl.getLocation();
const has_body = fn_decl.hasBody();
const storage_class = fn_decl.getStorageClass();
var decl_ctx = FnDeclContext{
.fn_name = fn_name,
.has_body = has_body,
.storage_class = storage_class,
.is_export = switch (storage_class) {
.None => has_body and !fn_decl.isInlineSpecified(),
.Extern, .Static => false,
.PrivateExtern => return failDecl(c, fn_decl_loc, fn_name, "unsupported storage class: private extern", .{}),
.Auto => unreachable, // Not legal on functions
.Register => unreachable, // Not legal on functions
},
};
var fn_qt = fn_decl.getType();
const fn_type = while (true) {
const fn_type = fn_qt.getTypePtr();
switch (fn_type.getTypeClass()) {
.Attributed => {
const attr_type = @ptrCast(*const clang.AttributedType, fn_type);
fn_qt = attr_type.getEquivalentType();
},
.Paren => {
const paren_type = @ptrCast(*const clang.ParenType, fn_type);
fn_qt = paren_type.getInnerType();
},
else => break fn_type,
}
} else unreachable;
const fn_ty = @ptrCast(*const clang.FunctionType, fn_type);
const return_qt = fn_ty.getReturnType();
const proto_node = switch (fn_type.getTypeClass()) {
.FunctionProto => blk: {
const fn_proto_type = @ptrCast(*const clang.FunctionProtoType, fn_type);
if (has_body and fn_proto_type.isVariadic()) {
decl_ctx.has_body = false;
decl_ctx.storage_class = .Extern;
decl_ctx.is_export = false;
try warn(c, &c.global_scope.base, fn_decl_loc, "TODO unable to translate variadic function, demoted to extern", .{});
}
break :blk transFnProto(c, fn_decl, fn_proto_type, fn_decl_loc, decl_ctx, true) catch |err| switch (err) {
error.UnsupportedType => {
return failDecl(c, fn_decl_loc, fn_name, "unable to resolve prototype of function", .{});
},
error.OutOfMemory => |e| return e,
};
},
.FunctionNoProto => blk: {
const fn_no_proto_type = @ptrCast(*const clang.FunctionType, fn_type);
break :blk transFnNoProto(c, fn_no_proto_type, fn_decl_loc, decl_ctx, true) catch |err| switch (err) {
error.UnsupportedType => {
return failDecl(c, fn_decl_loc, fn_name, "unable to resolve prototype of function", .{});
},
error.OutOfMemory => |e| return e,
};
},
else => return failDecl(c, fn_decl_loc, fn_name, "unable to resolve function type {}", .{fn_type.getTypeClass()}),
};
if (!decl_ctx.has_body) {
return addTopLevelDecl(c, fn_name, Node.initPayload(&proto_node.base));
}
// actual function definition with body
const body_stmt = fn_decl.getBody();
var block_scope = try Scope.Block.init(c, &c.global_scope.base, false);
block_scope.return_type = return_qt;
defer block_scope.deinit();
var scope = &block_scope.base;
var param_id: c_uint = 0;
for (proto_node.data.params) |*param| {
const param_name = param.name orelse {
proto_node.data.is_extern = true;
proto_node.data.is_export = false;
try warn(c, &c.global_scope.base, fn_decl_loc, "function {s} parameter has no name, demoted to extern", .{fn_name});
return addTopLevelDecl(c, fn_name, Node.initPayload(&proto_node.base));
};
const c_param = fn_decl.getParamDecl(param_id);
const qual_type = c_param.getOriginalType();
const is_const = qual_type.isConstQualified();
const mangled_param_name = try block_scope.makeMangledName(c, param_name);
param.name = mangled_param_name;
if (!is_const) {
const bare_arg_name = try std.fmt.allocPrint(c.arena, "arg_{s}", .{mangled_param_name});
const arg_name = try block_scope.makeMangledName(c, bare_arg_name);
param.name = arg_name;
const redecl_node = try Tag.arg_redecl.create(c.arena, .{ .actual = mangled_param_name, .mangled = arg_name });
try block_scope.statements.append(redecl_node);
}
try block_scope.discardVariable(c, mangled_param_name);
param_id += 1;
}
const casted_body = @ptrCast(*const clang.CompoundStmt, body_stmt);
transCompoundStmtInline(c, casted_body, &block_scope) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
error.UnsupportedTranslation,
error.UnsupportedType,
=> {
proto_node.data.is_extern = true;
proto_node.data.is_export = false;
try warn(c, &c.global_scope.base, fn_decl_loc, "unable to translate function, demoted to extern", .{});
return addTopLevelDecl(c, fn_name, Node.initPayload(&proto_node.base));
},
};
// add return statement if the function didn't have one
blk: {
const maybe_body = try block_scope.complete(c);
if (fn_ty.getNoReturnAttr() or isCVoid(return_qt) or maybe_body.isNoreturn(false)) {
proto_node.data.body = maybe_body;
break :blk;
}
const rhs = transZeroInitExpr(c, scope, fn_decl_loc, return_qt.getTypePtr()) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
error.UnsupportedTranslation,
error.UnsupportedType,
=> {
proto_node.data.is_extern = true;
proto_node.data.is_export = false;
try warn(c, &c.global_scope.base, fn_decl_loc, "unable to create a return value for function, demoted to extern", .{});
return addTopLevelDecl(c, fn_name, Node.initPayload(&proto_node.base));
},
};
const ret = try Tag.@"return".create(c.arena, rhs);
try block_scope.statements.append(ret);
proto_node.data.body = try block_scope.complete(c);
}
return addTopLevelDecl(c, fn_name, Node.initPayload(&proto_node.base));
}
fn transQualTypeMaybeInitialized(c: *Context, scope: *Scope, qt: clang.QualType, decl_init: ?*const clang.Expr, loc: clang.SourceLocation) TransError!Node {
return if (decl_init) |init_expr|
transQualTypeInitialized(c, scope, qt, init_expr, loc)
else
transQualType(c, scope, qt, loc);
}
/// This is used in global scope to convert a string literal `S` to [*c]u8:
/// &(struct {
/// var static = S.*;
/// }).static;
fn stringLiteralToCharStar(c: *Context, str: Node) Error!Node {
const var_name = Scope.Block.StaticInnerName;
const variables = try c.arena.alloc(Node, 1);
variables[0] = try Tag.mut_str.create(c.arena, .{ .name = var_name, .init = str });
const anon_struct = try Tag.@"struct".create(c.arena, .{
.layout = .none,
.fields = &.{},
.functions = &.{},
.variables = variables,
});
const member_access = try Tag.field_access.create(c.arena, .{
.lhs = anon_struct,
.field_name = var_name,
});
return Tag.address_of.create(c.arena, member_access);
}
/// if mangled_name is not null, this var decl was declared in a block scope.
fn visitVarDecl(c: *Context, var_decl: *const clang.VarDecl, mangled_name: ?[]const u8) Error!void {
const var_name = mangled_name orelse try c.str(@ptrCast(*const clang.NamedDecl, var_decl).getName_bytes_begin());
if (c.global_scope.sym_table.contains(var_name))
return; // Avoid processing this decl twice
const is_pub = mangled_name == null;
const is_threadlocal = var_decl.getTLSKind() != .None;
const scope = &c.global_scope.base;
const var_decl_loc = var_decl.getLocation();
const qual_type = var_decl.getTypeSourceInfo_getType();
const storage_class = var_decl.getStorageClass();
const is_const = qual_type.isConstQualified();
const has_init = var_decl.hasInit();
const decl_init = var_decl.getInit();
// In C extern variables with initializers behave like Zig exports.
// extern int foo = 2;
// does the same as:
// extern int foo;
// int foo = 2;
var is_extern = storage_class == .Extern and !has_init;
var is_export = !is_extern and storage_class != .Static;
const type_node = transQualTypeMaybeInitialized(c, scope, qual_type, decl_init, var_decl_loc) catch |err| switch (err) {
error.UnsupportedTranslation, error.UnsupportedType => {
return failDecl(c, var_decl_loc, var_name, "unable to resolve variable type", .{});
},
error.OutOfMemory => |e| return e,
};
var init_node: ?Node = null;
// If the initialization expression is not present, initialize with undefined.
// If it is an integer literal, we can skip the @as since it will be redundant
// with the variable type.
if (has_init) trans_init: {
if (decl_init) |expr| {
const node_or_error = if (expr.getStmtClass() == .StringLiteralClass)
transStringLiteralInitializer(c, scope, @ptrCast(*const clang.StringLiteral, expr), type_node)
else
transExprCoercing(c, scope, expr, .used);
init_node = node_or_error catch |err| switch (err) {
error.UnsupportedTranslation,
error.UnsupportedType,
=> {
is_extern = true;
is_export = false;
try warn(c, scope, var_decl_loc, "unable to translate variable initializer, demoted to extern", .{});
break :trans_init;
},
error.OutOfMemory => |e| return e,
};
if (!qualTypeIsBoolean(qual_type) and isBoolRes(init_node.?)) {
init_node = try Tag.bool_to_int.create(c.arena, init_node.?);
} else if (init_node.?.tag() == .string_literal and qualTypeIsCharStar(qual_type)) {
init_node = try stringLiteralToCharStar(c, init_node.?);
}
} else {
init_node = Tag.undefined_literal.init();
}
} else if (storage_class != .Extern) {
// The C language specification states that variables with static or threadlocal
// storage without an initializer are initialized to a zero value.
// @import("std").mem.zeroes(T)
init_node = try Tag.std_mem_zeroes.create(c.arena, type_node);
}
const linksection_string = blk: {
var str_len: usize = undefined;
if (var_decl.getSectionAttribute(&str_len)) |str_ptr| {
break :blk str_ptr[0..str_len];
}
break :blk null;
};
const node = try Tag.var_decl.create(c.arena, .{
.is_pub = is_pub,
.is_const = is_const,
.is_extern = is_extern,
.is_export = is_export,
.is_threadlocal = is_threadlocal,
.linksection_string = linksection_string,
.alignment = zigAlignment(var_decl.getAlignedAttribute(c.clang_context)),
.name = var_name,
.type = type_node,
.init = init_node,
});
return addTopLevelDecl(c, var_name, node);
}
const builtin_typedef_map = std.ComptimeStringMap([]const u8, .{
.{ "uint8_t", "u8" },
.{ "int8_t", "i8" },
.{ "uint16_t", "u16" },
.{ "int16_t", "i16" },
.{ "uint32_t", "u32" },
.{ "int32_t", "i32" },
.{ "uint64_t", "u64" },
.{ "int64_t", "i64" },
.{ "intptr_t", "isize" },
.{ "uintptr_t", "usize" },
.{ "ssize_t", "isize" },
.{ "size_t", "usize" },
});
fn transTypeDef(c: *Context, scope: *Scope, typedef_decl: *const clang.TypedefNameDecl) Error!void {
if (c.decl_table.get(@ptrToInt(typedef_decl.getCanonicalDecl()))) |_|
return; // Avoid processing this decl twice
const toplevel = scope.id == .root;
const bs: *Scope.Block = if (!toplevel) try scope.findBlockScope(c) else undefined;
var name: []const u8 = try c.str(@ptrCast(*const clang.NamedDecl, typedef_decl).getName_bytes_begin());
try c.typedefs.put(c.gpa, name, {});
if (builtin_typedef_map.get(name)) |builtin| {
return c.decl_table.putNoClobber(c.gpa, @ptrToInt(typedef_decl.getCanonicalDecl()), builtin);
}
if (!toplevel) name = try bs.makeMangledName(c, name);
try c.decl_table.putNoClobber(c.gpa, @ptrToInt(typedef_decl.getCanonicalDecl()), name);
const child_qt = typedef_decl.getUnderlyingType();
const typedef_loc = typedef_decl.getLocation();
const init_node = transQualType(c, scope, child_qt, typedef_loc) catch |err| switch (err) {
error.UnsupportedType => {
return failDecl(c, typedef_loc, name, "unable to resolve typedef child type", .{});
},
error.OutOfMemory => |e| return e,
};
const payload = try c.arena.create(ast.Payload.SimpleVarDecl);
payload.* = .{
.base = .{ .tag = ([2]Tag{ .var_simple, .pub_var_simple })[@boolToInt(toplevel)] },
.data = .{
.name = name,
.init = init_node,
},
};
const node = Node.initPayload(&payload.base);
if (toplevel) {
try addTopLevelDecl(c, name, node);
} else {
try scope.appendNode(node);
if (node.tag() != .pub_var_simple) {
try bs.discardVariable(c, name);
}
}
}
/// Build a getter function for a flexible array member at the end of a C struct
/// e.g. `T items[]` or `T items[0]`. The generated function returns a [*c] pointer
/// to the flexible array with the correct const and volatile qualifiers
fn buildFlexibleArrayFn(
c: *Context,
scope: *Scope,
layout: *const clang.ASTRecordLayout,
field_name: []const u8,
field_decl: *const clang.FieldDecl,
) TypeError!Node {
const field_qt = field_decl.getType();
const u8_type = try Tag.type.create(c.arena, "u8");
const self_param_name = "self";
const self_param = try Tag.identifier.create(c.arena, self_param_name);
const self_type = try Tag.typeof.create(c.arena, self_param);
const fn_params = try c.arena.alloc(ast.Payload.Param, 1);
fn_params[0] = .{
.name = self_param_name,
.type = Tag.@"anytype".init(),
.is_noalias = false,
};
const array_type = @ptrCast(*const clang.ArrayType, field_qt.getTypePtr());
const element_qt = array_type.getElementType();
const element_type = try transQualType(c, scope, element_qt, field_decl.getLocation());
var block_scope = try Scope.Block.init(c, scope, false);
defer block_scope.deinit();
const intermediate_type_name = try block_scope.makeMangledName(c, "Intermediate");
const intermediate_type = try Tag.helpers_flexible_array_type.create(c.arena, .{ .lhs = self_type, .rhs = u8_type });
const intermediate_type_decl = try Tag.var_simple.create(c.arena, .{
.name = intermediate_type_name,
.init = intermediate_type,
});
try block_scope.statements.append(intermediate_type_decl);
const intermediate_type_ident = try Tag.identifier.create(c.arena, intermediate_type_name);
const return_type_name = try block_scope.makeMangledName(c, "ReturnType");
const return_type = try Tag.helpers_flexible_array_type.create(c.arena, .{ .lhs = self_type, .rhs = element_type });
const return_type_decl = try Tag.var_simple.create(c.arena, .{
.name = return_type_name,
.init = return_type,
});
try block_scope.statements.append(return_type_decl);
const return_type_ident = try Tag.identifier.create(c.arena, return_type_name);
const field_index = field_decl.getFieldIndex();
const bit_offset = layout.getFieldOffset(field_index); // this is a target-specific constant based on the struct layout
const byte_offset = bit_offset / 8;
const casted_self = try Tag.ptr_cast.create(c.arena, .{
.lhs = intermediate_type_ident,
.rhs = self_param,
});
const field_offset = try transCreateNodeNumber(c, byte_offset, .int);
const field_ptr = try Tag.add.create(c.arena, .{ .lhs = casted_self, .rhs = field_offset });
const alignment = try Tag.alignof.create(c.arena, element_type);
const ptr_val = try Tag.align_cast.create(c.arena, .{ .lhs = alignment, .rhs = field_ptr });
const ptr_cast = try Tag.ptr_cast.create(c.arena, .{ .lhs = return_type_ident, .rhs = ptr_val });
const return_stmt = try Tag.@"return".create(c.arena, ptr_cast);
try block_scope.statements.append(return_stmt);
const payload = try c.arena.create(ast.Payload.Func);
payload.* = .{
.base = .{ .tag = .func },
.data = .{
.is_pub = true,
.is_extern = false,
.is_export = false,
.is_var_args = false,
.name = field_name,
.linksection_string = null,
.explicit_callconv = null,
.params = fn_params,
.return_type = return_type,
.body = try block_scope.complete(c),
.alignment = null,
},
};
return Node.initPayload(&payload.base);
}
fn isFlexibleArrayFieldDecl(c: *Context, field_decl: *const clang.FieldDecl) bool {
return qualTypeCanon(field_decl.getType()).isIncompleteOrZeroLengthArrayType(c.clang_context);
}
/// clang's RecordDecl::hasFlexibleArrayMember is not suitable for determining
/// this because it returns false for a record that ends with a zero-length
/// array, but we consider those to be flexible arrays
fn hasFlexibleArrayField(c: *Context, record_def: *const clang.RecordDecl) bool {
var it = record_def.field_begin();
const end_it = record_def.field_end();
while (it.neq(end_it)) : (it = it.next()) {
const field_decl = it.deref();
if (isFlexibleArrayFieldDecl(c, field_decl)) return true;
}
return false;
}
fn transRecordDecl(c: *Context, scope: *Scope, record_decl: *const clang.RecordDecl) Error!void {
if (c.decl_table.get(@ptrToInt(record_decl.getCanonicalDecl()))) |_|
return; // Avoid processing this decl twice
const record_loc = record_decl.getLocation();
const toplevel = scope.id == .root;
const bs: *Scope.Block = if (!toplevel) try scope.findBlockScope(c) else undefined;
var is_union = false;
var container_kind_name: []const u8 = undefined;
var bare_name: []const u8 = try c.str(@ptrCast(*const clang.NamedDecl, record_decl).getName_bytes_begin());
if (record_decl.isUnion()) {
container_kind_name = "union";
is_union = true;
} else if (record_decl.isStruct()) {
container_kind_name = "struct";
} else {
try c.decl_table.putNoClobber(c.gpa, @ptrToInt(record_decl.getCanonicalDecl()), bare_name);
return failDecl(c, record_loc, bare_name, "record {s} is not a struct or union", .{bare_name});
}
var is_unnamed = false;
var name = bare_name;
if (c.unnamed_typedefs.get(@ptrToInt(record_decl.getCanonicalDecl()))) |typedef_name| {
bare_name = typedef_name;
name = typedef_name;
} else {
// Record declarations such as `struct {...} x` have no name but they're not
// anonymous hence here isAnonymousStructOrUnion is not needed
if (bare_name.len == 0) {
bare_name = try std.fmt.allocPrint(c.arena, "unnamed_{d}", .{c.getMangle()});
is_unnamed = true;
}
name = try std.fmt.allocPrint(c.arena, "{s}_{s}", .{ container_kind_name, bare_name });
}
if (!toplevel) name = try bs.makeMangledName(c, name);
try c.decl_table.putNoClobber(c.gpa, @ptrToInt(record_decl.getCanonicalDecl()), name);
const is_pub = toplevel and !is_unnamed;
const init_node = blk: {
const record_def = record_decl.getDefinition() orelse {
try c.opaque_demotes.put(c.gpa, @ptrToInt(record_decl.getCanonicalDecl()), {});
break :blk Tag.opaque_literal.init();
};
const is_packed = record_decl.getPackedAttribute();
var fields = std.ArrayList(ast.Payload.Record.Field).init(c.gpa);
defer fields.deinit();
var functions = std.ArrayList(Node).init(c.gpa);
defer functions.deinit();
const has_flexible_array = hasFlexibleArrayField(c, record_def);
var unnamed_field_count: u32 = 0;
var it = record_def.field_begin();
const end_it = record_def.field_end();
const layout = record_def.getASTRecordLayout(c.clang_context);
const record_alignment = layout.getAlignment();
while (it.neq(end_it)) : (it = it.next()) {
const field_decl = it.deref();
const field_loc = field_decl.getLocation();
const field_qt = field_decl.getType();
if (field_decl.isBitField()) {
try c.opaque_demotes.put(c.gpa, @ptrToInt(record_decl.getCanonicalDecl()), {});
try warn(c, scope, field_loc, "{s} demoted to opaque type - has bitfield", .{container_kind_name});
break :blk Tag.opaque_literal.init();
}
var is_anon = false;
var field_name = try c.str(@ptrCast(*const clang.NamedDecl, field_decl).getName_bytes_begin());
if (field_decl.isAnonymousStructOrUnion() or field_name.len == 0) {
// Context.getMangle() is not used here because doing so causes unpredictable field names for anonymous fields.
field_name = try std.fmt.allocPrint(c.arena, "unnamed_{d}", .{unnamed_field_count});
unnamed_field_count += 1;
is_anon = true;
}
if (isFlexibleArrayFieldDecl(c, field_decl)) {
const flexible_array_fn = buildFlexibleArrayFn(c, scope, layout, field_name, field_decl) catch |err| switch (err) {
error.UnsupportedType => {
try c.opaque_demotes.put(c.gpa, @ptrToInt(record_decl.getCanonicalDecl()), {});
try warn(c, scope, record_loc, "{s} demoted to opaque type - unable to translate type of flexible array field {s}", .{ container_kind_name, field_name });
break :blk Tag.opaque_literal.init();
},
else => |e| return e,
};
try functions.append(flexible_array_fn);
continue;
}
const field_type = transQualType(c, scope, field_qt, field_loc) catch |err| switch (err) {
error.UnsupportedType => {
try c.opaque_demotes.put(c.gpa, @ptrToInt(record_decl.getCanonicalDecl()), {});
try warn(c, scope, record_loc, "{s} demoted to opaque type - unable to translate type of field {s}", .{ container_kind_name, field_name });
break :blk Tag.opaque_literal.init();
},
else => |e| return e,
};
const alignment = if (has_flexible_array and field_decl.getFieldIndex() == 0)
@intCast(c_uint, record_alignment)
else
zigAlignment(field_decl.getAlignedAttribute(c.clang_context));
if (is_anon) {
try c.decl_table.putNoClobber(c.gpa, @ptrToInt(field_decl.getCanonicalDecl()), field_name);
}
try fields.append(.{
.name = field_name,
.type = field_type,
.alignment = alignment,
});
}
const record_payload = try c.arena.create(ast.Payload.Record);
record_payload.* = .{
.base = .{ .tag = ([2]Tag{ .@"struct", .@"union" })[@boolToInt(is_union)] },
.data = .{
.layout = if (is_packed) .@"packed" else .@"extern",
.fields = try c.arena.dupe(ast.Payload.Record.Field, fields.items),
.functions = try c.arena.dupe(Node, functions.items),
.variables = &.{},
},
};
break :blk Node.initPayload(&record_payload.base);
};
const payload = try c.arena.create(ast.Payload.SimpleVarDecl);
payload.* = .{
.base = .{ .tag = ([2]Tag{ .var_simple, .pub_var_simple })[@boolToInt(is_pub)] },
.data = .{
.name = name,
.init = init_node,
},
};
const node = Node.initPayload(&payload.base);
if (toplevel) {
try addTopLevelDecl(c, name, node);
if (!is_unnamed)
try c.alias_list.append(.{ .alias = bare_name, .name = name });
} else {
try scope.appendNode(node);
if (node.tag() != .pub_var_simple) {
try bs.discardVariable(c, name);
}
}
}
fn transEnumDecl(c: *Context, scope: *Scope, enum_decl: *const clang.EnumDecl) Error!void {
if (c.decl_table.get(@ptrToInt(enum_decl.getCanonicalDecl()))) |_|
return; // Avoid processing this decl twice
const enum_loc = enum_decl.getLocation();
const toplevel = scope.id == .root;
const bs: *Scope.Block = if (!toplevel) try scope.findBlockScope(c) else undefined;
var is_unnamed = false;
var bare_name: []const u8 = try c.str(@ptrCast(*const clang.NamedDecl, enum_decl).getName_bytes_begin());
var name = bare_name;
if (c.unnamed_typedefs.get(@ptrToInt(enum_decl.getCanonicalDecl()))) |typedef_name| {
bare_name = typedef_name;
name = typedef_name;
} else {
if (bare_name.len == 0) {
bare_name = try std.fmt.allocPrint(c.arena, "unnamed_{d}", .{c.getMangle()});
is_unnamed = true;
}
name = try std.fmt.allocPrint(c.arena, "enum_{s}", .{bare_name});
}
if (!toplevel) name = try bs.makeMangledName(c, name);
try c.decl_table.putNoClobber(c.gpa, @ptrToInt(enum_decl.getCanonicalDecl()), name);
const enum_type_node = if (enum_decl.getDefinition()) |enum_def| blk: {
var it = enum_def.enumerator_begin();
const end_it = enum_def.enumerator_end();
while (it.neq(end_it)) : (it = it.next()) {
const enum_const = it.deref();
var enum_val_name: []const u8 = try c.str(@ptrCast(*const clang.NamedDecl, enum_const).getName_bytes_begin());
if (!toplevel) {
enum_val_name = try bs.makeMangledName(c, enum_val_name);
}
const enum_const_qt = @ptrCast(*const clang.ValueDecl, enum_const).getType();
const enum_const_loc = @ptrCast(*const clang.Decl, enum_const).getLocation();
const enum_const_type_node: ?Node = transQualType(c, scope, enum_const_qt, enum_const_loc) catch |err| switch (err) {
error.UnsupportedType => null,
else => |e| return e,
};
const enum_const_def = try Tag.enum_constant.create(c.arena, .{
.name = enum_val_name,
.is_public = toplevel,
.type = enum_const_type_node,
.value = try transCreateNodeAPInt(c, enum_const.getInitVal()),
});
if (toplevel)
try addTopLevelDecl(c, enum_val_name, enum_const_def)
else {
try scope.appendNode(enum_const_def);
try bs.discardVariable(c, enum_val_name);
}
}
const int_type = enum_decl.getIntegerType();
// The underlying type may be null in case of forward-declared enum
// types, while that's not ISO-C compliant many compilers allow this and
// default to the usual integer type used for all the enums.
// default to c_int since msvc and gcc default to different types
break :blk if (int_type.ptr != null)
transQualType(c, scope, int_type, enum_loc) catch |err| switch (err) {
error.UnsupportedType => {
return failDecl(c, enum_loc, name, "unable to translate enum integer type", .{});
},
else => |e| return e,
}
else
try Tag.type.create(c.arena, "c_int");
} else blk: {
try c.opaque_demotes.put(c.gpa, @ptrToInt(enum_decl.getCanonicalDecl()), {});
break :blk Tag.opaque_literal.init();
};
const is_pub = toplevel and !is_unnamed;
const payload = try c.arena.create(ast.Payload.SimpleVarDecl);
payload.* = .{
.base = .{ .tag = ([2]Tag{ .var_simple, .pub_var_simple })[@boolToInt(is_pub)] },
.data = .{
.init = enum_type_node,
.name = name,
},
};
const node = Node.initPayload(&payload.base);
if (toplevel) {
try addTopLevelDecl(c, name, node);
if (!is_unnamed)
try c.alias_list.append(.{ .alias = bare_name, .name = name });
} else {
try scope.appendNode(node);
if (node.tag() != .pub_var_simple) {
try bs.discardVariable(c, name);
}
}
}
const ResultUsed = enum {
used,
unused,
};
fn transStmt(
c: *Context,
scope: *Scope,
stmt: *const clang.Stmt,
result_used: ResultUsed,
) TransError!Node {
const sc = stmt.getStmtClass();
switch (sc) {
.BinaryOperatorClass => return transBinaryOperator(c, scope, @ptrCast(*const clang.BinaryOperator, stmt), result_used),
.CompoundStmtClass => return transCompoundStmt(c, scope, @ptrCast(*const clang.CompoundStmt, stmt)),
.CStyleCastExprClass => return transCStyleCastExprClass(c, scope, @ptrCast(*const clang.CStyleCastExpr, stmt), result_used),
.DeclStmtClass => return transDeclStmt(c, scope, @ptrCast(*const clang.DeclStmt, stmt)),
.DeclRefExprClass => return transDeclRefExpr(c, scope, @ptrCast(*const clang.DeclRefExpr, stmt)),
.ImplicitCastExprClass => return transImplicitCastExpr(c, scope, @ptrCast(*const clang.ImplicitCastExpr, stmt), result_used),
.IntegerLiteralClass => return transIntegerLiteral(c, scope, @ptrCast(*const clang.IntegerLiteral, stmt), result_used, .with_as),
.ReturnStmtClass => return transReturnStmt(c, scope, @ptrCast(*const clang.ReturnStmt, stmt)),
.StringLiteralClass => return transStringLiteral(c, scope, @ptrCast(*const clang.StringLiteral, stmt), result_used),
.ParenExprClass => {
const expr = try transExpr(c, scope, @ptrCast(*const clang.ParenExpr, stmt).getSubExpr(), .used);
return maybeSuppressResult(c, scope, result_used, expr);
},
.InitListExprClass => return transInitListExpr(c, scope, @ptrCast(*const clang.InitListExpr, stmt), result_used),
.ImplicitValueInitExprClass => return transImplicitValueInitExpr(c, scope, @ptrCast(*const clang.Expr, stmt), result_used),
.IfStmtClass => return transIfStmt(c, scope, @ptrCast(*const clang.IfStmt, stmt)),
.WhileStmtClass => return transWhileLoop(c, scope, @ptrCast(*const clang.WhileStmt, stmt)),
.DoStmtClass => return transDoWhileLoop(c, scope, @ptrCast(*const clang.DoStmt, stmt)),
.NullStmtClass => {
return Tag.empty_block.init();
},
.ContinueStmtClass => return Tag.@"continue".init(),
.BreakStmtClass => return Tag.@"break".init(),
.ForStmtClass => return transForLoop(c, scope, @ptrCast(*const clang.ForStmt, stmt)),
.FloatingLiteralClass => return transFloatingLiteral(c, scope, @ptrCast(*const clang.FloatingLiteral, stmt), result_used),
.ConditionalOperatorClass => {
return transConditionalOperator(c, scope, @ptrCast(*const clang.ConditionalOperator, stmt), result_used);
},
.BinaryConditionalOperatorClass => {
return transBinaryConditionalOperator(c, scope, @ptrCast(*const clang.BinaryConditionalOperator, stmt), result_used);
},
.SwitchStmtClass => return transSwitch(c, scope, @ptrCast(*const clang.SwitchStmt, stmt)),
.CaseStmtClass, .DefaultStmtClass => {
return fail(c, error.UnsupportedTranslation, stmt.getBeginLoc(), "TODO complex switch", .{});
},
.ConstantExprClass => return transConstantExpr(c, scope, @ptrCast(*const clang.Expr, stmt), result_used),
.PredefinedExprClass => return transPredefinedExpr(c, scope, @ptrCast(*const clang.PredefinedExpr, stmt), result_used),
.CharacterLiteralClass => return transCharLiteral(c, scope, @ptrCast(*const clang.CharacterLiteral, stmt), result_used, .with_as),
.StmtExprClass => return transStmtExpr(c, scope, @ptrCast(*const clang.StmtExpr, stmt), result_used),
.MemberExprClass => return transMemberExpr(c, scope, @ptrCast(*const clang.MemberExpr, stmt), result_used),
.ArraySubscriptExprClass => return transArrayAccess(c, scope, @ptrCast(*const clang.ArraySubscriptExpr, stmt), result_used),
.CallExprClass => return transCallExpr(c, scope, @ptrCast(*const clang.CallExpr, stmt), result_used),
.UnaryExprOrTypeTraitExprClass => return transUnaryExprOrTypeTraitExpr(c, scope, @ptrCast(*const clang.UnaryExprOrTypeTraitExpr, stmt), result_used),
.UnaryOperatorClass => return transUnaryOperator(c, scope, @ptrCast(*const clang.UnaryOperator, stmt), result_used),
.CompoundAssignOperatorClass => return transCompoundAssignOperator(c, scope, @ptrCast(*const clang.CompoundAssignOperator, stmt), result_used),
.OpaqueValueExprClass => {
const source_expr = @ptrCast(*const clang.OpaqueValueExpr, stmt).getSourceExpr().?;
const expr = try transExpr(c, scope, source_expr, .used);
return maybeSuppressResult(c, scope, result_used, expr);
},
.OffsetOfExprClass => return transOffsetOfExpr(c, scope, @ptrCast(*const clang.OffsetOfExpr, stmt), result_used),
.CompoundLiteralExprClass => {
const compound_literal = @ptrCast(*const clang.CompoundLiteralExpr, stmt);
return transExpr(c, scope, compound_literal.getInitializer(), result_used);
},
.GenericSelectionExprClass => {
const gen_sel = @ptrCast(*const clang.GenericSelectionExpr, stmt);
return transExpr(c, scope, gen_sel.getResultExpr(), result_used);
},
.ConvertVectorExprClass => {
const conv_vec = @ptrCast(*const clang.ConvertVectorExpr, stmt);
const conv_vec_node = try transConvertVectorExpr(c, scope, stmt.getBeginLoc(), conv_vec);
return maybeSuppressResult(c, scope, result_used, conv_vec_node);
},
.ShuffleVectorExprClass => {
const shuffle_vec_expr = @ptrCast(*const clang.ShuffleVectorExpr, stmt);
const shuffle_vec_node = try transShuffleVectorExpr(c, scope, shuffle_vec_expr);
return maybeSuppressResult(c, scope, result_used, shuffle_vec_node);
},
.ChooseExprClass => {
const choose_expr = @ptrCast(*const clang.ChooseExpr, stmt);
return transExpr(c, scope, choose_expr.getChosenSubExpr(), result_used);
},
// When adding new cases here, see comment for maybeBlockify()
.GCCAsmStmtClass,
.GotoStmtClass,
.IndirectGotoStmtClass,
.AttributedStmtClass,
.AddrLabelExprClass,
.AtomicExprClass,
.BlockExprClass,
.UserDefinedLiteralClass,
.BuiltinBitCastExprClass,
.DesignatedInitExprClass,
.LabelStmtClass,
=> return fail(c, error.UnsupportedTranslation, stmt.getBeginLoc(), "TODO implement translation of stmt class {s}", .{@tagName(sc)}),
else => return fail(c, error.UnsupportedTranslation, stmt.getBeginLoc(), "unsupported stmt class {s}", .{@tagName(sc)}),
}
}
/// See https://clang.llvm.org/docs/LanguageExtensions.html#langext-builtin-convertvector
fn transConvertVectorExpr(
c: *Context,
scope: *Scope,
source_loc: clang.SourceLocation,
expr: *const clang.ConvertVectorExpr,
) TransError!Node {
_ = source_loc;
const base_stmt = @ptrCast(*const clang.Stmt, expr);
var block_scope = try Scope.Block.init(c, scope, true);
defer block_scope.deinit();
const src_expr = expr.getSrcExpr();
const src_type = qualTypeCanon(src_expr.getType());
const src_vector_ty = @ptrCast(*const clang.VectorType, src_type);
const src_element_qt = src_vector_ty.getElementType();
const src_expr_node = try transExpr(c, &block_scope.base, src_expr, .used);
const dst_qt = expr.getTypeSourceInfo_getType();
const dst_type_node = try transQualType(c, &block_scope.base, dst_qt, base_stmt.getBeginLoc());
const dst_vector_ty = @ptrCast(*const clang.VectorType, qualTypeCanon(dst_qt));
const num_elements = dst_vector_ty.getNumElements();
const dst_element_qt = dst_vector_ty.getElementType();
// workaround for https://github.com/ziglang/zig/issues/8322
// we store the casted results into temp variables and use those
// to initialize the vector. Eventually we can just directly
// construct the init_list from casted source members
var i: usize = 0;
while (i < num_elements) : (i += 1) {
const mangled_name = try block_scope.makeMangledName(c, "tmp");
const value = try Tag.array_access.create(c.arena, .{
.lhs = src_expr_node,
.rhs = try transCreateNodeNumber(c, i, .int),
});
const tmp_decl_node = try Tag.var_simple.create(c.arena, .{
.name = mangled_name,
.init = try transCCast(c, &block_scope.base, base_stmt.getBeginLoc(), dst_element_qt, src_element_qt, value),
});
try block_scope.statements.append(tmp_decl_node);
}
const init_list = try c.arena.alloc(Node, num_elements);
for (init_list) |*init, init_index| {
const tmp_decl = block_scope.statements.items[init_index];
const name = tmp_decl.castTag(.var_simple).?.data.name;
init.* = try Tag.identifier.create(c.arena, name);
}
const vec_init = try Tag.array_init.create(c.arena, .{
.cond = dst_type_node,
.cases = init_list,
});
const break_node = try Tag.break_val.create(c.arena, .{
.label = block_scope.label,
.val = vec_init,
});
try block_scope.statements.append(break_node);
return block_scope.complete(c);
}
fn makeShuffleMask(c: *Context, scope: *Scope, expr: *const clang.ShuffleVectorExpr, vector_len: Node) TransError!Node {
const num_subexprs = expr.getNumSubExprs();
assert(num_subexprs >= 3); // two source vectors + at least 1 index expression
const mask_len = num_subexprs - 2;
const mask_type = try Tag.std_meta_vector.create(c.arena, .{
.lhs = try transCreateNodeNumber(c, mask_len, .int),
.rhs = try Tag.type.create(c.arena, "i32"),
});
const init_list = try c.arena.alloc(Node, mask_len);
for (init_list) |*init, i| {
const index_expr = try transExprCoercing(c, scope, expr.getExpr(@intCast(c_uint, i + 2)), .used);
const converted_index = try Tag.helpers_shuffle_vector_index.create(c.arena, .{ .lhs = index_expr, .rhs = vector_len });
init.* = converted_index;
}
return Tag.array_init.create(c.arena, .{
.cond = mask_type,
.cases = init_list,
});
}
/// @typeInfo(@TypeOf(vec_node)).Vector.<field>
fn vectorTypeInfo(arena: *mem.Allocator, vec_node: Node, field: []const u8) TransError!Node {
const typeof_call = try Tag.typeof.create(arena, vec_node);
const typeinfo_call = try Tag.typeinfo.create(arena, typeof_call);
const vector_type_info = try Tag.field_access.create(arena, .{ .lhs = typeinfo_call, .field_name = "Vector" });
return Tag.field_access.create(arena, .{ .lhs = vector_type_info, .field_name = field });
}
fn transShuffleVectorExpr(
c: *Context,
scope: *Scope,
expr: *const clang.ShuffleVectorExpr,
) TransError!Node {
const base_expr = @ptrCast(*const clang.Expr, expr);
const num_subexprs = expr.getNumSubExprs();
if (num_subexprs < 3) return fail(c, error.UnsupportedTranslation, base_expr.getBeginLoc(), "ShuffleVector needs at least 1 index", .{});
const a = try transExpr(c, scope, expr.getExpr(0), .used);
const b = try transExpr(c, scope, expr.getExpr(1), .used);
// clang requires first two arguments to __builtin_shufflevector to be same type
const vector_child_type = try vectorTypeInfo(c.arena, a, "child");
const vector_len = try vectorTypeInfo(c.arena, a, "len");
const shuffle_mask = try makeShuffleMask(c, scope, expr, vector_len);
return Tag.shuffle.create(c.arena, .{
.element_type = vector_child_type,
.a = a,
.b = b,
.mask_vector = shuffle_mask,
});
}
/// Translate a "simple" offsetof expression containing exactly one component,
/// when that component is of kind .Field - e.g. offsetof(mytype, myfield)
fn transSimpleOffsetOfExpr(
c: *Context,
scope: *Scope,
expr: *const clang.OffsetOfExpr,
) TransError!Node {
_ = scope;
assert(expr.getNumComponents() == 1);
const component = expr.getComponent(0);
if (component.getKind() == .Field) {
const field_decl = component.getField();
if (field_decl.getParent()) |record_decl| {
if (c.decl_table.get(@ptrToInt(record_decl.getCanonicalDecl()))) |type_name| {
const type_node = try Tag.type.create(c.arena, type_name);
var raw_field_name = try c.str(@ptrCast(*const clang.NamedDecl, field_decl).getName_bytes_begin());
const quoted_field_name = try std.fmt.allocPrint(c.arena, "\"{s}\"", .{raw_field_name});
const field_name_node = try Tag.string_literal.create(c.arena, quoted_field_name);
return Tag.offset_of.create(c.arena, .{
.lhs = type_node,
.rhs = field_name_node,
});
}
}
}
return fail(c, error.UnsupportedTranslation, expr.getBeginLoc(), "failed to translate simple OffsetOfExpr", .{});
}
fn transOffsetOfExpr(
c: *Context,
scope: *Scope,
expr: *const clang.OffsetOfExpr,
result_used: ResultUsed,
) TransError!Node {
if (expr.getNumComponents() == 1) {
const offsetof_expr = try transSimpleOffsetOfExpr(c, scope, expr);
return maybeSuppressResult(c, scope, result_used, offsetof_expr);
}
// TODO implement OffsetOfExpr with more than 1 component
// OffsetOfExpr API:
// call expr.getComponent(idx) while idx < expr.getNumComponents()
// component.getKind() will be either .Array or .Field (other kinds are C++-only)
// if .Field, use component.getField() to retrieve *clang.FieldDecl
// if .Array, use component.getArrayExprIndex() to get a c_uint which
// can be passed to expr.getIndexExpr(expr_index) to get the *clang.Expr for the array index
return fail(c, error.UnsupportedTranslation, expr.getBeginLoc(), "TODO: implement complex OffsetOfExpr translation", .{});
}
/// Cast a signed integer node to a usize, for use in pointer arithmetic. Negative numbers
/// will become very large positive numbers but that is ok since we only use this in
/// pointer arithmetic expressions, where wraparound will ensure we get the correct value.
/// node -> @bitCast(usize, @intCast(isize, node))
fn usizeCastForWrappingPtrArithmetic(gpa: *mem.Allocator, node: Node) TransError!Node {
const intcast_node = try Tag.int_cast.create(gpa, .{
.lhs = try Tag.type.create(gpa, "isize"),
.rhs = node,
});
return Tag.bit_cast.create(gpa, .{
.lhs = try Tag.type.create(gpa, "usize"),
.rhs = intcast_node,
});
}
/// Translate an arithmetic expression with a pointer operand and a signed-integer operand.
/// Zig requires a usize argument for pointer arithmetic, so we intCast to isize and then
/// bitcast to usize; pointer wraparound make the math work.
/// Zig pointer addition is not commutative (unlike C); the pointer operand needs to be on the left.
/// The + operator in C is not a sequence point so it should be safe to switch the order if necessary.
fn transCreatePointerArithmeticSignedOp(
c: *Context,
scope: *Scope,
stmt: *const clang.BinaryOperator,
result_used: ResultUsed,
) TransError!Node {
const is_add = stmt.getOpcode() == .Add;
const lhs = stmt.getLHS();
const rhs = stmt.getRHS();
const swap_operands = is_add and cIsSignedInteger(getExprQualType(c, lhs));
const swizzled_lhs = if (swap_operands) rhs else lhs;
const swizzled_rhs = if (swap_operands) lhs else rhs;
const lhs_node = try transExpr(c, scope, swizzled_lhs, .used);
const rhs_node = try transExpr(c, scope, swizzled_rhs, .used);
const bitcast_node = try usizeCastForWrappingPtrArithmetic(c.arena, rhs_node);
return transCreateNodeInfixOp(
c,
scope,
if (is_add) .add else .sub,
lhs_node,
bitcast_node,
result_used,
);
}
fn transBinaryOperator(
c: *Context,
scope: *Scope,
stmt: *const clang.BinaryOperator,
result_used: ResultUsed,
) TransError!Node {
const op = stmt.getOpcode();
const qt = stmt.getType();
const isPointerDiffExpr = cIsPointerDiffExpr(c, stmt);
switch (op) {
.Assign => return try transCreateNodeAssign(c, scope, result_used, stmt.getLHS(), stmt.getRHS()),
.Comma => {
var block_scope = try Scope.Block.init(c, scope, true);
defer block_scope.deinit();
const lhs = try transExpr(c, &block_scope.base, stmt.getLHS(), .unused);
try block_scope.statements.append(lhs);
const rhs = try transExpr(c, &block_scope.base, stmt.getRHS(), .used);
const break_node = try Tag.break_val.create(c.arena, .{
.label = block_scope.label,
.val = rhs,
});
try block_scope.statements.append(break_node);
const block_node = try block_scope.complete(c);
return maybeSuppressResult(c, scope, result_used, block_node);
},
.Div => {
if (cIsSignedInteger(qt)) {
// signed integer division uses @divTrunc
const lhs = try transExpr(c, scope, stmt.getLHS(), .used);
const rhs = try transExpr(c, scope, stmt.getRHS(), .used);
const div_trunc = try Tag.div_trunc.create(c.arena, .{ .lhs = lhs, .rhs = rhs });
return maybeSuppressResult(c, scope, result_used, div_trunc);
}
},
.Rem => {
if (cIsSignedInteger(qt)) {
// signed integer division uses @rem
const lhs = try transExpr(c, scope, stmt.getLHS(), .used);
const rhs = try transExpr(c, scope, stmt.getRHS(), .used);
const rem = try Tag.rem.create(c.arena, .{ .lhs = lhs, .rhs = rhs });
return maybeSuppressResult(c, scope, result_used, rem);
}
},
.Shl => {
return transCreateNodeShiftOp(c, scope, stmt, .shl, result_used);
},
.Shr => {
return transCreateNodeShiftOp(c, scope, stmt, .shr, result_used);
},
.LAnd => {
return transCreateNodeBoolInfixOp(c, scope, stmt, .@"and", result_used);
},
.LOr => {
return transCreateNodeBoolInfixOp(c, scope, stmt, .@"or", result_used);
},
.Add, .Sub => {
// `ptr + idx` and `idx + ptr` -> ptr + @bitCast(usize, @intCast(isize, idx))
// `ptr - idx` -> ptr - @bitCast(usize, @intCast(isize, idx))
if (qualTypeIsPtr(qt) and (cIsSignedInteger(getExprQualType(c, stmt.getLHS())) or
cIsSignedInteger(getExprQualType(c, stmt.getRHS())))) return transCreatePointerArithmeticSignedOp(c, scope, stmt, result_used);
},
else => {},
}
var op_id: Tag = undefined;
switch (op) {
.Add => {
if (cIsUnsignedInteger(qt)) {
op_id = .add_wrap;
} else {
op_id = .add;
}
},
.Sub => {
if (cIsUnsignedInteger(qt) or isPointerDiffExpr) {
op_id = .sub_wrap;
} else {
op_id = .sub;
}
},
.Mul => {
if (cIsUnsignedInteger(qt)) {
op_id = .mul_wrap;
} else {
op_id = .mul;
}
},
.Div => {
// unsigned/float division uses the operator
op_id = .div;
},
.Rem => {
// unsigned/float division uses the operator
op_id = .mod;
},
.LT => {
op_id = .less_than;
},
.GT => {
op_id = .greater_than;
},
.LE => {
op_id = .less_than_equal;
},
.GE => {
op_id = .greater_than_equal;
},
.EQ => {
op_id = .equal;
},
.NE => {
op_id = .not_equal;
},
.And => {
op_id = .bit_and;
},
.Xor => {
op_id = .bit_xor;
},
.Or => {
op_id = .bit_or;
},
else => unreachable,
}
const lhs_uncasted = try transExpr(c, scope, stmt.getLHS(), .used);
const rhs_uncasted = try transExpr(c, scope, stmt.getRHS(), .used);
const lhs = if (isBoolRes(lhs_uncasted))
try Tag.bool_to_int.create(c.arena, lhs_uncasted)
else if (isPointerDiffExpr)
try Tag.ptr_to_int.create(c.arena, lhs_uncasted)
else
lhs_uncasted;
const rhs = if (isBoolRes(rhs_uncasted))
try Tag.bool_to_int.create(c.arena, rhs_uncasted)
else if (isPointerDiffExpr)
try Tag.ptr_to_int.create(c.arena, rhs_uncasted)
else
rhs_uncasted;
const infixOpNode = try transCreateNodeInfixOp(c, scope, op_id, lhs, rhs, result_used);
if (isPointerDiffExpr) {
// @divExact(@bitCast(<platform-ptrdiff_t>, @ptrToInt(lhs) -% @ptrToInt(rhs)), @sizeOf(<lhs target type>))
const ptrdiff_type = try transQualTypeIntWidthOf(c, qt, true);
// C standard requires that pointer subtraction operands are of the same type,
// otherwise it is undefined behavior. So we can assume the left and right
// sides are the same QualType and arbitrarily choose left.
const lhs_expr = stmt.getLHS();
const lhs_qt = getExprQualType(c, lhs_expr);
const lhs_qt_translated = try transQualType(c, scope, lhs_qt, lhs_expr.getBeginLoc());
const elem_type = lhs_qt_translated.castTag(.c_pointer).?.data.elem_type;
const sizeof = try Tag.sizeof.create(c.arena, elem_type);
const bitcast = try Tag.bit_cast.create(c.arena, .{ .lhs = ptrdiff_type, .rhs = infixOpNode });
return Tag.div_exact.create(c.arena, .{
.lhs = bitcast,
.rhs = sizeof,
});
}
return infixOpNode;
}
fn transCompoundStmtInline(
c: *Context,
stmt: *const clang.CompoundStmt,
block: *Scope.Block,
) TransError!void {
var it = stmt.body_begin();
const end_it = stmt.body_end();
while (it != end_it) : (it += 1) {
const result = try transStmt(c, &block.base, it[0], .unused);
switch (result.tag()) {
.declaration, .empty_block => {},
else => try block.statements.append(result),
}
}
}
fn transCompoundStmt(c: *Context, scope: *Scope, stmt: *const clang.CompoundStmt) TransError!Node {
var block_scope = try Scope.Block.init(c, scope, false);
defer block_scope.deinit();
try transCompoundStmtInline(c, stmt, &block_scope);
return try block_scope.complete(c);
}
fn transCStyleCastExprClass(
c: *Context,
scope: *Scope,
stmt: *const clang.CStyleCastExpr,
result_used: ResultUsed,
) TransError!Node {
const sub_expr = stmt.getSubExpr();
const cast_node = (try transCCast(
c,
scope,
stmt.getBeginLoc(),
stmt.getType(),
sub_expr.getType(),
try transExpr(c, scope, sub_expr, .used),
));
return maybeSuppressResult(c, scope, result_used, cast_node);
}
/// Clang reports the alignment in bits, we use bytes
/// Clang uses 0 for "no alignment specified", we use null
fn zigAlignment(bit_alignment: c_uint) ?c_uint {
if (bit_alignment == 0) return null;
return bit_alignment / 8;
}
fn transDeclStmtOne(
c: *Context,
scope: *Scope,
decl: *const clang.Decl,
block_scope: *Scope.Block,
) TransError!void {
switch (decl.getKind()) {
.Var => {
const var_decl = @ptrCast(*const clang.VarDecl, decl);
const decl_init = var_decl.getInit();
const qual_type = var_decl.getTypeSourceInfo_getType();
const name = try c.str(@ptrCast(*const clang.NamedDecl, var_decl).getName_bytes_begin());
const mangled_name = try block_scope.makeMangledName(c, name);
if (var_decl.getStorageClass() == .Extern) {
// This is actually a global variable, put it in the global scope and reference it.
// `_ = mangled_name;`
return visitVarDecl(c, var_decl, mangled_name);
}
const is_static_local = var_decl.isStaticLocal();
const is_const = qual_type.isConstQualified();
const loc = decl.getLocation();
const type_node = try transQualTypeMaybeInitialized(c, scope, qual_type, decl_init, loc);
var init_node = if (decl_init) |expr|
if (expr.getStmtClass() == .StringLiteralClass)
try transStringLiteralInitializer(c, scope, @ptrCast(*const clang.StringLiteral, expr), type_node)
else
try transExprCoercing(c, scope, expr, .used)
else if (is_static_local)
try Tag.std_mem_zeroes.create(c.arena, type_node)
else
Tag.undefined_literal.init();
if (!qualTypeIsBoolean(qual_type) and isBoolRes(init_node)) {
init_node = try Tag.bool_to_int.create(c.arena, init_node);
} else if (init_node.tag() == .string_literal and qualTypeIsCharStar(qual_type)) {
const dst_type_node = try transQualType(c, scope, qual_type, loc);
init_node = try removeCVQualifiers(c, dst_type_node, init_node);
}
const var_name: []const u8 = if (is_static_local) Scope.Block.StaticInnerName else mangled_name;
var node = try Tag.var_decl.create(c.arena, .{
.is_pub = false,
.is_const = is_const,
.is_extern = false,
.is_export = false,
.is_threadlocal = var_decl.getTLSKind() != .None,
.linksection_string = null,
.alignment = zigAlignment(var_decl.getAlignedAttribute(c.clang_context)),
.name = var_name,
.type = type_node,
.init = init_node,
});
if (is_static_local) {
node = try Tag.static_local_var.create(c.arena, .{ .name = mangled_name, .init = node });
}
try block_scope.statements.append(node);
try block_scope.discardVariable(c, mangled_name);
const cleanup_attr = var_decl.getCleanupAttribute();
if (cleanup_attr) |fn_decl| {
const cleanup_fn_name = try c.str(@ptrCast(*const clang.NamedDecl, fn_decl).getName_bytes_begin());
const fn_id = try Tag.identifier.create(c.arena, cleanup_fn_name);
const varname = try Tag.identifier.create(c.arena, mangled_name);
const args = try c.arena.alloc(Node, 1);
args[0] = try Tag.address_of.create(c.arena, varname);
const cleanup_call = try Tag.call.create(c.arena, .{ .lhs = fn_id, .args = args });
const discard = try Tag.discard.create(c.arena, .{ .should_skip = false, .value = cleanup_call });
const deferred_cleanup = try Tag.@"defer".create(c.arena, discard);
try block_scope.statements.append(deferred_cleanup);
}
},
.Typedef => {
try transTypeDef(c, scope, @ptrCast(*const clang.TypedefNameDecl, decl));
},
.Record => {
try transRecordDecl(c, scope, @ptrCast(*const clang.RecordDecl, decl));
},
.Enum => {
try transEnumDecl(c, scope, @ptrCast(*const clang.EnumDecl, decl));
},
.Function => {
try visitFnDecl(c, @ptrCast(*const clang.FunctionDecl, decl));
},
else => {
const decl_name = try c.str(decl.getDeclKindName());
try warn(c, &c.global_scope.base, decl.getLocation(), "ignoring {s} declaration", .{decl_name});
},
}
}
fn transDeclStmt(c: *Context, scope: *Scope, stmt: *const clang.DeclStmt) TransError!Node {
const block_scope = try scope.findBlockScope(c);
var it = stmt.decl_begin();
const end_it = stmt.decl_end();
while (it != end_it) : (it += 1) {
try transDeclStmtOne(c, scope, it[0], block_scope);
}
return Tag.declaration.init();
}
fn transDeclRefExpr(
c: *Context,
scope: *Scope,
expr: *const clang.DeclRefExpr,
) TransError!Node {
const value_decl = expr.getDecl();
const name = try c.str(@ptrCast(*const clang.NamedDecl, value_decl).getName_bytes_begin());
const mangled_name = scope.getAlias(name);
var ref_expr = try Tag.identifier.create(c.arena, mangled_name);
if (@ptrCast(*const clang.Decl, value_decl).getKind() == .Var) {
const var_decl = @ptrCast(*const clang.VarDecl, value_decl);
if (var_decl.isStaticLocal()) {
ref_expr = try Tag.field_access.create(c.arena, .{
.lhs = ref_expr,
.field_name = Scope.Block.StaticInnerName,
});
}
}
scope.skipVariableDiscard(mangled_name);
return ref_expr;
}
fn transImplicitCastExpr(
c: *Context,
scope: *Scope,
expr: *const clang.ImplicitCastExpr,
result_used: ResultUsed,
) TransError!Node {
const sub_expr = expr.getSubExpr();
const dest_type = getExprQualType(c, @ptrCast(*const clang.Expr, expr));
const src_type = getExprQualType(c, sub_expr);
switch (expr.getCastKind()) {
.BitCast, .FloatingCast, .FloatingToIntegral, .IntegralToFloating, .IntegralCast, .PointerToIntegral, .IntegralToPointer => {
const sub_expr_node = try transExpr(c, scope, sub_expr, .used);
const casted = try transCCast(c, scope, expr.getBeginLoc(), dest_type, src_type, sub_expr_node);
return maybeSuppressResult(c, scope, result_used, casted);
},
.LValueToRValue, .NoOp, .FunctionToPointerDecay => {
const sub_expr_node = try transExpr(c, scope, sub_expr, .used);
return maybeSuppressResult(c, scope, result_used, sub_expr_node);
},
.ArrayToPointerDecay => {
const sub_expr_node = try transExpr(c, scope, sub_expr, .used);
if (exprIsNarrowStringLiteral(sub_expr) or exprIsFlexibleArrayRef(c, sub_expr)) {
return maybeSuppressResult(c, scope, result_used, sub_expr_node);
}
const addr = try Tag.address_of.create(c.arena, sub_expr_node);
const casted = try transCPtrCast(c, scope, expr.getBeginLoc(), dest_type, src_type, addr);
return maybeSuppressResult(c, scope, result_used, casted);
},
.NullToPointer => {
return Tag.null_literal.init();
},
.PointerToBoolean => {
// @ptrToInt(val) != 0
const ptr_to_int = try Tag.ptr_to_int.create(c.arena, try transExpr(c, scope, sub_expr, .used));
const ne = try Tag.not_equal.create(c.arena, .{ .lhs = ptr_to_int, .rhs = Tag.zero_literal.init() });
return maybeSuppressResult(c, scope, result_used, ne);
},
.IntegralToBoolean, .FloatingToBoolean => {
const sub_expr_node = try transExpr(c, scope, sub_expr, .used);
// The expression is already a boolean one, return it as-is
if (isBoolRes(sub_expr_node))
return maybeSuppressResult(c, scope, result_used, sub_expr_node);
// val != 0
const ne = try Tag.not_equal.create(c.arena, .{ .lhs = sub_expr_node, .rhs = Tag.zero_literal.init() });
return maybeSuppressResult(c, scope, result_used, ne);
},
.BuiltinFnToFnPtr => {
return transBuiltinFnExpr(c, scope, sub_expr, result_used);
},
.ToVoid => {
// Should only appear in the rhs and lhs of a ConditionalOperator
return transExpr(c, scope, sub_expr, .unused);
},
else => |kind| return fail(
c,
error.UnsupportedTranslation,
@ptrCast(*const clang.Stmt, expr).getBeginLoc(),
"unsupported CastKind {s}",
.{@tagName(kind)},
),
}
}
fn isBuiltinDefined(name: []const u8) bool {
inline for (meta.declarations(std.zig.c_builtins)) |decl| {
if (std.mem.eql(u8, name, decl.name)) return true;
}
return false;
}
fn transBuiltinFnExpr(c: *Context, scope: *Scope, expr: *const clang.Expr, used: ResultUsed) TransError!Node {
const node = try transExpr(c, scope, expr, used);
if (node.castTag(.identifier)) |ident| {
const name = ident.data;
if (!isBuiltinDefined(name)) return fail(c, error.UnsupportedTranslation, expr.getBeginLoc(), "TODO implement function '{s}' in std.zig.c_builtins", .{name});
}
return node;
}
fn transBoolExpr(
c: *Context,
scope: *Scope,
expr: *const clang.Expr,
used: ResultUsed,
) TransError!Node {
if (@ptrCast(*const clang.Stmt, expr).getStmtClass() == .IntegerLiteralClass) {
var signum: c_int = undefined;
if (!(@ptrCast(*const clang.IntegerLiteral, expr).getSignum(&signum, c.clang_context))) {
return fail(c, error.UnsupportedTranslation, expr.getBeginLoc(), "invalid integer literal", .{});
}
const is_zero = signum == 0;
return Node{ .tag_if_small_enough = @enumToInt(([2]Tag{ .true_literal, .false_literal })[@boolToInt(is_zero)]) };
}
var res = try transExpr(c, scope, expr, used);
if (isBoolRes(res)) {
return maybeSuppressResult(c, scope, used, res);
}
const ty = getExprQualType(c, expr).getTypePtr();
const node = try finishBoolExpr(c, scope, expr.getBeginLoc(), ty, res, used);
return maybeSuppressResult(c, scope, used, node);
}
fn exprIsBooleanType(expr: *const clang.Expr) bool {
return qualTypeIsBoolean(expr.getType());
}
fn exprIsNarrowStringLiteral(expr: *const clang.Expr) bool {
switch (expr.getStmtClass()) {
.StringLiteralClass => {
const string_lit = @ptrCast(*const clang.StringLiteral, expr);
return string_lit.getCharByteWidth() == 1;
},
.PredefinedExprClass => return true,
.UnaryOperatorClass => {
const op_expr = @ptrCast(*const clang.UnaryOperator, expr).getSubExpr();
return exprIsNarrowStringLiteral(op_expr);
},
.ParenExprClass => {
const op_expr = @ptrCast(*const clang.ParenExpr, expr).getSubExpr();
return exprIsNarrowStringLiteral(op_expr);
},
.GenericSelectionExprClass => {
const gen_sel = @ptrCast(*const clang.GenericSelectionExpr, expr);
return exprIsNarrowStringLiteral(gen_sel.getResultExpr());
},
else => return false,
}
}
fn exprIsFlexibleArrayRef(c: *Context, expr: *const clang.Expr) bool {
if (expr.getStmtClass() == .MemberExprClass) {
const member_expr = @ptrCast(*const clang.MemberExpr, expr);
const member_decl = member_expr.getMemberDecl();
const decl_kind = @ptrCast(*const clang.Decl, member_decl).getKind();
if (decl_kind == .Field) {
const field_decl = @ptrCast(*const clang.FieldDecl, member_decl);
return isFlexibleArrayFieldDecl(c, field_decl);
}
}
return false;
}
fn isBoolRes(res: Node) bool {
switch (res.tag()) {
.@"or",
.@"and",
.equal,
.not_equal,
.less_than,
.less_than_equal,
.greater_than,
.greater_than_equal,
.not,
.false_literal,
.true_literal,
=> return true,
else => return false,
}
}
fn finishBoolExpr(
c: *Context,
scope: *Scope,
loc: clang.SourceLocation,
ty: *const clang.Type,
node: Node,
used: ResultUsed,
) TransError!Node {
switch (ty.getTypeClass()) {
.Builtin => {
const builtin_ty = @ptrCast(*const clang.BuiltinType, ty);
switch (builtin_ty.getKind()) {
.Bool => return node,
.Char_U,
.UChar,
.Char_S,
.SChar,
.UShort,
.UInt,
.ULong,
.ULongLong,
.Short,
.Int,
.Long,
.LongLong,
.UInt128,
.Int128,
.Float,
.Double,
.Float128,
.LongDouble,
.WChar_U,
.Char8,
.Char16,
.Char32,
.WChar_S,
.Float16,
=> {
// node != 0
return Tag.not_equal.create(c.arena, .{ .lhs = node, .rhs = Tag.zero_literal.init() });
},
.NullPtr => {
// node == null
return Tag.equal.create(c.arena, .{ .lhs = node, .rhs = Tag.null_literal.init() });
},
else => {},
}
},
.Pointer => {
// node != null
return Tag.not_equal.create(c.arena, .{ .lhs = node, .rhs = Tag.null_literal.init() });
},
.Typedef => {
const typedef_ty = @ptrCast(*const clang.TypedefType, ty);
const typedef_decl = typedef_ty.getDecl();
const underlying_type = typedef_decl.getUnderlyingType();
return finishBoolExpr(c, scope, loc, underlying_type.getTypePtr(), node, used);
},
.Enum => {
// node != 0
return Tag.not_equal.create(c.arena, .{ .lhs = node, .rhs = Tag.zero_literal.init() });
},
.Elaborated => {
const elaborated_ty = @ptrCast(*const clang.ElaboratedType, ty);
const named_type = elaborated_ty.getNamedType();
return finishBoolExpr(c, scope, loc, named_type.getTypePtr(), node, used);
},
else => {},
}
return fail(c, error.UnsupportedType, loc, "unsupported bool expression type", .{});
}
const SuppressCast = enum {
with_as,
no_as,
};
fn transIntegerLiteral(
c: *Context,
scope: *Scope,
expr: *const clang.IntegerLiteral,
result_used: ResultUsed,
suppress_as: SuppressCast,
) TransError!Node {
var eval_result: clang.ExprEvalResult = undefined;
if (!expr.EvaluateAsInt(&eval_result, c.clang_context)) {
const loc = expr.getBeginLoc();
return fail(c, error.UnsupportedTranslation, loc, "invalid integer literal", .{});
}
if (suppress_as == .no_as) {
const int_lit_node = try transCreateNodeAPInt(c, eval_result.Val.getInt());
return maybeSuppressResult(c, scope, result_used, int_lit_node);
}
// Integer literals in C have types, and this can matter for several reasons.
// For example, this is valid C:
// unsigned char y = 256;
// How this gets evaluated is the 256 is an integer, which gets truncated to signed char, then bit-casted
// to unsigned char, resulting in 0. In order for this to work, we have to emit this zig code:
// var y = @bitCast(u8, @truncate(i8, @as(c_int, 256)));
// Ideally in translate-c we could flatten this out to simply:
// var y: u8 = 0;
// But the first step is to be correct, and the next step is to make the output more elegant.
// @as(T, x)
const expr_base = @ptrCast(*const clang.Expr, expr);
const ty_node = try transQualType(c, scope, expr_base.getType(), expr_base.getBeginLoc());
const rhs = try transCreateNodeAPInt(c, eval_result.Val.getInt());
const as = try Tag.as.create(c.arena, .{ .lhs = ty_node, .rhs = rhs });
return maybeSuppressResult(c, scope, result_used, as);
}
fn transReturnStmt(
c: *Context,
scope: *Scope,
expr: *const clang.ReturnStmt,
) TransError!Node {
const val_expr = expr.getRetValue() orelse
return Tag.return_void.init();
var rhs = try transExprCoercing(c, scope, val_expr, .used);
const return_qt = scope.findBlockReturnType(c);
if (isBoolRes(rhs) and !qualTypeIsBoolean(return_qt)) {
rhs = try Tag.bool_to_int.create(c.arena, rhs);
}
return Tag.@"return".create(c.arena, rhs);
}
fn transNarrowStringLiteral(
c: *Context,
scope: *Scope,
stmt: *const clang.StringLiteral,
result_used: ResultUsed,
) TransError!Node {
var len: usize = undefined;
const bytes_ptr = stmt.getString_bytes_begin_size(&len);
const str = try std.fmt.allocPrint(c.arena, "\"{}\"", .{std.zig.fmtEscapes(bytes_ptr[0..len])});
const node = try Tag.string_literal.create(c.arena, str);
return maybeSuppressResult(c, scope, result_used, node);
}
fn transStringLiteral(
c: *Context,
scope: *Scope,
stmt: *const clang.StringLiteral,
result_used: ResultUsed,
) TransError!Node {
const kind = stmt.getKind();
switch (kind) {
.Ascii, .UTF8 => return transNarrowStringLiteral(c, scope, stmt, result_used),
.UTF16, .UTF32, .Wide => {
const str_type = @tagName(stmt.getKind());
const name = try std.fmt.allocPrint(c.arena, "zig.{s}_string_{d}", .{ str_type, c.getMangle() });
const expr_base = @ptrCast(*const clang.Expr, stmt);
const array_type = try transQualTypeInitialized(c, scope, expr_base.getType(), expr_base, expr_base.getBeginLoc());
const lit_array = try transStringLiteralInitializer(c, scope, stmt, array_type);
const decl = try Tag.var_simple.create(c.arena, .{ .name = name, .init = lit_array });
try scope.appendNode(decl);
const node = try Tag.identifier.create(c.arena, name);
return maybeSuppressResult(c, scope, result_used, node);
},
}
}
fn getArrayPayload(array_type: Node) ast.Payload.Array.ArrayTypeInfo {
return (array_type.castTag(.array_type) orelse array_type.castTag(.null_sentinel_array_type).?).data;
}
/// Translate a string literal that is initializing an array. In general narrow string
/// literals become `"<string>".*` or `"<string>"[0..<size>].*` if they need truncation.
/// Wide string literals become an array of integers. zero-fillers pad out the array to
/// the appropriate length, if necessary.
fn transStringLiteralInitializer(
c: *Context,
scope: *Scope,
stmt: *const clang.StringLiteral,
array_type: Node,
) TransError!Node {
assert(array_type.tag() == .array_type or array_type.tag() == .null_sentinel_array_type);
const is_narrow = stmt.getKind() == .Ascii or stmt.getKind() == .UTF8;
const str_length = stmt.getLength();
const payload = getArrayPayload(array_type);
const array_size = payload.len;
const elem_type = payload.elem_type;
if (array_size == 0) return Tag.empty_array.create(c.arena, elem_type);
const num_inits = math.min(str_length, array_size);
const init_node = if (num_inits > 0) blk: {
if (is_narrow) {
// "string literal".* or string literal"[0..num_inits].*
var str = try transNarrowStringLiteral(c, scope, stmt, .used);
if (str_length != array_size) str = try Tag.string_slice.create(c.arena, .{ .string = str, .end = num_inits });
break :blk try Tag.deref.create(c.arena, str);
} else {
const init_list = try c.arena.alloc(Node, num_inits);
var i: c_uint = 0;
while (i < num_inits) : (i += 1) {
init_list[i] = try transCreateCharLitNode(c, false, stmt.getCodeUnit(i));
}
const init_args = .{ .len = num_inits, .elem_type = elem_type };
const init_array_type = try if (array_type.tag() == .array_type) Tag.array_type.create(c.arena, init_args) else Tag.null_sentinel_array_type.create(c.arena, init_args);
break :blk try Tag.array_init.create(c.arena, .{
.cond = init_array_type,
.cases = init_list,
});
}
} else null;
if (num_inits == array_size) return init_node.?; // init_node is only null if num_inits == 0; but if num_inits == array_size == 0 we've already returned
assert(array_size > str_length); // If array_size <= str_length, `num_inits == array_size` and we've already returned.
const filler_node = try Tag.array_filler.create(c.arena, .{
.type = elem_type,
.filler = Tag.zero_literal.init(),
.count = array_size - str_length,
});
if (init_node) |some| {
return Tag.array_cat.create(c.arena, .{ .lhs = some, .rhs = filler_node });
} else {
return filler_node;
}
}
/// determine whether `stmt` is a "pointer subtraction expression" - a subtraction where
/// both operands resolve to addresses. The C standard requires that both operands
/// point to elements of the same array object, but we do not verify that here.
fn cIsPointerDiffExpr(c: *Context, stmt: *const clang.BinaryOperator) bool {
_ = c;
const lhs = @ptrCast(*const clang.Stmt, stmt.getLHS());
const rhs = @ptrCast(*const clang.Stmt, stmt.getRHS());
return stmt.getOpcode() == .Sub and
qualTypeIsPtr(@ptrCast(*const clang.Expr, lhs).getType()) and
qualTypeIsPtr(@ptrCast(*const clang.Expr, rhs).getType());
}
fn cIsEnum(qt: clang.QualType) bool {
return qt.getCanonicalType().getTypeClass() == .Enum;
}
fn cIsVector(qt: clang.QualType) bool {
return qt.getCanonicalType().getTypeClass() == .Vector;
}
/// Get the underlying int type of an enum. The C compiler chooses a signed int
/// type that is large enough to hold all of the enum's values. It is not required
/// to be the smallest possible type that can hold all the values.
fn cIntTypeForEnum(enum_qt: clang.QualType) clang.QualType {
assert(cIsEnum(enum_qt));
const ty = enum_qt.getCanonicalType().getTypePtr();
const enum_ty = @ptrCast(*const clang.EnumType, ty);
const enum_decl = enum_ty.getDecl();
return enum_decl.getIntegerType();
}
// when modifying this function, make sure to also update std.meta.cast
fn transCCast(
c: *Context,
scope: *Scope,
loc: clang.SourceLocation,
dst_type: clang.QualType,
src_type: clang.QualType,
expr: Node,
) !Node {
if (qualTypeCanon(dst_type).isVoidType()) return expr;
if (dst_type.eq(src_type)) return expr;
if (qualTypeIsPtr(dst_type) and qualTypeIsPtr(src_type))
return transCPtrCast(c, scope, loc, dst_type, src_type, expr);
if (cIsEnum(dst_type)) return transCCast(c, scope, loc, cIntTypeForEnum(dst_type), src_type, expr);
if (cIsEnum(src_type)) return transCCast(c, scope, loc, dst_type, cIntTypeForEnum(src_type), expr);
const dst_node = try transQualType(c, scope, dst_type, loc);
if (cIsInteger(dst_type) and cIsInteger(src_type)) {
// 1. If src_type is an enum, determine the underlying signed int type
// 2. Extend or truncate without changing signed-ness.
// 3. Bit-cast to correct signed-ness
const src_type_is_signed = cIsSignedInteger(src_type);
var src_int_expr = expr;
if (isBoolRes(src_int_expr)) {
src_int_expr = try Tag.bool_to_int.create(c.arena, src_int_expr);
}
switch (cIntTypeCmp(dst_type, src_type)) {
.lt => {
// @truncate(SameSignSmallerInt, src_int_expr)
const ty_node = try transQualTypeIntWidthOf(c, dst_type, src_type_is_signed);
src_int_expr = try Tag.truncate.create(c.arena, .{ .lhs = ty_node, .rhs = src_int_expr });
},
.gt => {
// @as(SameSignBiggerInt, src_int_expr)
const ty_node = try transQualTypeIntWidthOf(c, dst_type, src_type_is_signed);
src_int_expr = try Tag.as.create(c.arena, .{ .lhs = ty_node, .rhs = src_int_expr });
},
.eq => {
// src_int_expr = src_int_expr
},
}
// @bitCast(dest_type, intermediate_value)
return Tag.bit_cast.create(c.arena, .{ .lhs = dst_node, .rhs = src_int_expr });
}
if (cIsVector(src_type) or cIsVector(dst_type)) {
// C cast where at least 1 operand is a vector requires them to be same size
// @bitCast(dest_type, val)
return Tag.bit_cast.create(c.arena, .{ .lhs = dst_node, .rhs = expr });
}
if (cIsInteger(dst_type) and qualTypeIsPtr(src_type)) {
// @intCast(dest_type, @ptrToInt(val))
const ptr_to_int = try Tag.ptr_to_int.create(c.arena, expr);
return Tag.int_cast.create(c.arena, .{ .lhs = dst_node, .rhs = ptr_to_int });
}
if (cIsInteger(src_type) and qualTypeIsPtr(dst_type)) {
// @intToPtr(dest_type, val)
return Tag.int_to_ptr.create(c.arena, .{ .lhs = dst_node, .rhs = expr });
}
if (cIsFloating(src_type) and cIsFloating(dst_type)) {
// @floatCast(dest_type, val)
return Tag.float_cast.create(c.arena, .{ .lhs = dst_node, .rhs = expr });
}
if (cIsFloating(src_type) and !cIsFloating(dst_type)) {
// @floatToInt(dest_type, val)
return Tag.float_to_int.create(c.arena, .{ .lhs = dst_node, .rhs = expr });
}
if (!cIsFloating(src_type) and cIsFloating(dst_type)) {
var rhs = expr;
if (qualTypeIsBoolean(src_type)) rhs = try Tag.bool_to_int.create(c.arena, expr);
// @intToFloat(dest_type, val)
return Tag.int_to_float.create(c.arena, .{ .lhs = dst_node, .rhs = rhs });
}
if (qualTypeIsBoolean(src_type) and !qualTypeIsBoolean(dst_type)) {
// @boolToInt returns either a comptime_int or a u1
// TODO: if dst_type is 1 bit & signed (bitfield) we need @bitCast
// instead of @as
const bool_to_int = try Tag.bool_to_int.create(c.arena, expr);
return Tag.as.create(c.arena, .{ .lhs = dst_node, .rhs = bool_to_int });
}
// @as(dest_type, val)
return Tag.as.create(c.arena, .{ .lhs = dst_node, .rhs = expr });
}
fn transExpr(c: *Context, scope: *Scope, expr: *const clang.Expr, used: ResultUsed) TransError!Node {
return transStmt(c, scope, @ptrCast(*const clang.Stmt, expr), used);
}
/// Same as `transExpr` but with the knowledge that the operand will be type coerced, and therefore
/// an `@as` would be redundant. This is used to prevent redundant `@as` in integer literals.
fn transExprCoercing(c: *Context, scope: *Scope, expr: *const clang.Expr, used: ResultUsed) TransError!Node {
switch (@ptrCast(*const clang.Stmt, expr).getStmtClass()) {
.IntegerLiteralClass => {
return transIntegerLiteral(c, scope, @ptrCast(*const clang.IntegerLiteral, expr), .used, .no_as);
},
.CharacterLiteralClass => {
return transCharLiteral(c, scope, @ptrCast(*const clang.CharacterLiteral, expr), .used, .no_as);
},
.UnaryOperatorClass => {
const un_expr = @ptrCast(*const clang.UnaryOperator, expr);
if (un_expr.getOpcode() == .Extension) {
return transExprCoercing(c, scope, un_expr.getSubExpr(), used);
}
},
.ImplicitCastExprClass => {
const cast_expr = @ptrCast(*const clang.ImplicitCastExpr, expr);
const sub_expr = cast_expr.getSubExpr();
switch (@ptrCast(*const clang.Stmt, sub_expr).getStmtClass()) {
.IntegerLiteralClass, .CharacterLiteralClass => switch (cast_expr.getCastKind()) {
.IntegralToFloating => return transExprCoercing(c, scope, sub_expr, used),
.IntegralCast => {
const dest_type = getExprQualType(c, expr);
if (literalFitsInType(c, sub_expr, dest_type))
return transExprCoercing(c, scope, sub_expr, used);
},
else => {},
},
else => {},
}
},
else => {},
}
return transExpr(c, scope, expr, .used);
}
fn literalFitsInType(c: *Context, expr: *const clang.Expr, qt: clang.QualType) bool {
var width = qualTypeIntBitWidth(c, qt) catch 8;
if (width == 0) width = 8; // Byte is the smallest type.
const is_signed = cIsSignedInteger(qt);
const width_max_int = (@as(u64, 1) << math.lossyCast(u6, width - @boolToInt(is_signed))) - 1;
switch (@ptrCast(*const clang.Stmt, expr).getStmtClass()) {
.CharacterLiteralClass => {
const char_lit = @ptrCast(*const clang.CharacterLiteral, expr);
const val = char_lit.getValue();
// If the val is less than the max int then it fits.
return val <= width_max_int;
},
.IntegerLiteralClass => {
const int_lit = @ptrCast(*const clang.IntegerLiteral, expr);
var eval_result: clang.ExprEvalResult = undefined;
if (!int_lit.EvaluateAsInt(&eval_result, c.clang_context)) {
return false;
}
const int = eval_result.Val.getInt();
return int.lessThanEqual(width_max_int);
},
else => unreachable,
}
}
fn transInitListExprRecord(
c: *Context,
scope: *Scope,
loc: clang.SourceLocation,
expr: *const clang.InitListExpr,
ty: *const clang.Type,
) TransError!Node {
var is_union_type = false;
// Unions and Structs are both represented as RecordDecl
const record_ty = ty.getAsRecordType() orelse
blk: {
is_union_type = true;
break :blk ty.getAsUnionType();
} orelse unreachable;
const record_decl = record_ty.getDecl();
const record_def = record_decl.getDefinition() orelse
unreachable;
const ty_node = try transType(c, scope, ty, loc);
const init_count = expr.getNumInits();
var field_inits = std.ArrayList(ast.Payload.ContainerInit.Initializer).init(c.gpa);
defer field_inits.deinit();
var init_i: c_uint = 0;
var it = record_def.field_begin();
const end_it = record_def.field_end();
while (it.neq(end_it)) : (it = it.next()) {
const field_decl = it.deref();
// The initializer for a union type has a single entry only
if (is_union_type and field_decl != expr.getInitializedFieldInUnion()) {
continue;
}
assert(init_i < init_count);
const elem_expr = expr.getInit(init_i);
init_i += 1;
// Generate the field assignment expression:
// .field_name = expr
var raw_name = try c.str(@ptrCast(*const clang.NamedDecl, field_decl).getName_bytes_begin());
if (field_decl.isAnonymousStructOrUnion()) {
const name = c.decl_table.get(@ptrToInt(field_decl.getCanonicalDecl())).?;
raw_name = try mem.dupe(c.arena, u8, name);
}
var init_expr = try transExpr(c, scope, elem_expr, .used);
const field_qt = field_decl.getType();
if (init_expr.tag() == .string_literal and qualTypeIsCharStar(field_qt)) {
if (scope.id == .root) {
init_expr = try stringLiteralToCharStar(c, init_expr);
} else {
const dst_type_node = try transQualType(c, scope, field_qt, loc);
init_expr = try removeCVQualifiers(c, dst_type_node, init_expr);
}
}
try field_inits.append(.{
.name = raw_name,
.value = init_expr,
});
}
if (ty_node.castTag(.identifier)) |ident_node| {
scope.skipVariableDiscard(ident_node.data);
}
return Tag.container_init.create(c.arena, .{
.lhs = ty_node,
.inits = try c.arena.dupe(ast.Payload.ContainerInit.Initializer, field_inits.items),
});
}
fn transInitListExprArray(
c: *Context,
scope: *Scope,
loc: clang.SourceLocation,
expr: *const clang.InitListExpr,
ty: *const clang.Type,
) TransError!Node {
const arr_type = ty.getAsArrayTypeUnsafe();
const child_qt = arr_type.getElementType();
const child_type = try transQualType(c, scope, child_qt, loc);
const init_count = expr.getNumInits();
assert(@ptrCast(*const clang.Type, arr_type).isConstantArrayType());
const const_arr_ty = @ptrCast(*const clang.ConstantArrayType, arr_type);
const size_ap_int = const_arr_ty.getSize();
const all_count = size_ap_int.getLimitedValue(usize);
const leftover_count = all_count - init_count;
if (all_count == 0) {
return Tag.empty_array.create(c.arena, child_type);
}
const init_node = if (init_count != 0) blk: {
const init_list = try c.arena.alloc(Node, init_count);
for (init_list) |*init, i| {
const elem_expr = expr.getInit(@intCast(c_uint, i));
init.* = try transExprCoercing(c, scope, elem_expr, .used);
}
const init_node = try Tag.array_init.create(c.arena, .{
.cond = try Tag.array_type.create(c.arena, .{ .len = init_count, .elem_type = child_type }),
.cases = init_list,
});
if (leftover_count == 0) {
return init_node;
}
break :blk init_node;
} else null;
const filler_val_expr = expr.getArrayFiller();
const filler_node = try Tag.array_filler.create(c.arena, .{
.type = child_type,
.filler = try transExprCoercing(c, scope, filler_val_expr, .used),
.count = leftover_count,
});
if (init_node) |some| {
return Tag.array_cat.create(c.arena, .{ .lhs = some, .rhs = filler_node });
} else {
return filler_node;
}
}
fn transInitListExprVector(
c: *Context,
scope: *Scope,
loc: clang.SourceLocation,
expr: *const clang.InitListExpr,
ty: *const clang.Type,
) TransError!Node {
_ = ty;
const qt = getExprQualType(c, @ptrCast(*const clang.Expr, expr));
const vector_type = try transQualType(c, scope, qt, loc);
const init_count = expr.getNumInits();
if (init_count == 0) {
return Tag.container_init.create(c.arena, .{
.lhs = vector_type,
.inits = try c.arena.alloc(ast.Payload.ContainerInit.Initializer, 0),
});
}
var block_scope = try Scope.Block.init(c, scope, true);
defer block_scope.deinit();
// workaround for https://github.com/ziglang/zig/issues/8322
// we store the initializers in temp variables and use those
// to initialize the vector. Eventually we can just directly
// construct the init_list from casted source members
var i: usize = 0;
while (i < init_count) : (i += 1) {
const mangled_name = try block_scope.makeMangledName(c, "tmp");
const init_expr = expr.getInit(@intCast(c_uint, i));
const tmp_decl_node = try Tag.var_simple.create(c.arena, .{
.name = mangled_name,
.init = try transExpr(c, &block_scope.base, init_expr, .used),
});
try block_scope.statements.append(tmp_decl_node);
}
const init_list = try c.arena.alloc(Node, init_count);
for (init_list) |*init, init_index| {
const tmp_decl = block_scope.statements.items[init_index];
const name = tmp_decl.castTag(.var_simple).?.data.name;
init.* = try Tag.identifier.create(c.arena, name);
}
const array_init = try Tag.array_init.create(c.arena, .{
.cond = vector_type,
.cases = init_list,
});
const break_node = try Tag.break_val.create(c.arena, .{
.label = block_scope.label,
.val = array_init,
});
try block_scope.statements.append(break_node);
return block_scope.complete(c);
}
fn transInitListExpr(
c: *Context,
scope: *Scope,
expr: *const clang.InitListExpr,
used: ResultUsed,
) TransError!Node {
const qt = getExprQualType(c, @ptrCast(*const clang.Expr, expr));
var qual_type = qt.getTypePtr();
const source_loc = @ptrCast(*const clang.Expr, expr).getBeginLoc();
if (qualTypeWasDemotedToOpaque(c, qt)) {
return fail(c, error.UnsupportedTranslation, source_loc, "cannot initialize opaque type", .{});
}
if (qual_type.isRecordType()) {
return maybeSuppressResult(c, scope, used, try transInitListExprRecord(
c,
scope,
source_loc,
expr,
qual_type,
));
} else if (qual_type.isArrayType()) {
return maybeSuppressResult(c, scope, used, try transInitListExprArray(
c,
scope,
source_loc,
expr,
qual_type,
));
} else if (qual_type.isVectorType()) {
return maybeSuppressResult(c, scope, used, try transInitListExprVector(
c,
scope,
source_loc,
expr,
qual_type,
));
} else {
const type_name = c.str(qual_type.getTypeClassName());
return fail(c, error.UnsupportedType, source_loc, "unsupported initlist type: '{s}'", .{type_name});
}
}
fn transZeroInitExpr(
c: *Context,
scope: *Scope,
source_loc: clang.SourceLocation,
ty: *const clang.Type,
) TransError!Node {
switch (ty.getTypeClass()) {
.Builtin => {
const builtin_ty = @ptrCast(*const clang.BuiltinType, ty);
switch (builtin_ty.getKind()) {
.Bool => return Tag.false_literal.init(),
.Char_U,
.UChar,
.Char_S,
.Char8,
.SChar,
.UShort,
.UInt,
.ULong,
.ULongLong,
.Short,
.Int,
.Long,
.LongLong,
.UInt128,
.Int128,
.Float,
.Double,
.Float128,
.Float16,
.LongDouble,
=> return Tag.zero_literal.init(),
else => return fail(c, error.UnsupportedType, source_loc, "unsupported builtin type", .{}),
}
},
.Pointer => return Tag.null_literal.init(),
.Typedef => {
const typedef_ty = @ptrCast(*const clang.TypedefType, ty);
const typedef_decl = typedef_ty.getDecl();
return transZeroInitExpr(
c,
scope,
source_loc,
typedef_decl.getUnderlyingType().getTypePtr(),
);
},
else => return Tag.std_mem_zeroes.create(c.arena, try transType(c, scope, ty, source_loc)),
}
}
fn transImplicitValueInitExpr(
c: *Context,
scope: *Scope,
expr: *const clang.Expr,
used: ResultUsed,
) TransError!Node {
_ = used;
const source_loc = expr.getBeginLoc();
const qt = getExprQualType(c, expr);
const ty = qt.getTypePtr();
return transZeroInitExpr(c, scope, source_loc, ty);
}
/// If a statement can possibly translate to a Zig assignment (either directly because it's
/// an assignment in C or indirectly via result assignment to `_`) AND it's the sole statement
/// in the body of an if statement or loop, then we need to put the statement into its own block.
/// The `else` case here corresponds to statements that could result in an assignment. If a statement
/// class never needs a block, add its enum to the top prong.
fn maybeBlockify(c: *Context, scope: *Scope, stmt: *const clang.Stmt) TransError!Node {
switch (stmt.getStmtClass()) {
.BreakStmtClass,
.CompoundStmtClass,
.ContinueStmtClass,
.DeclRefExprClass,
.DeclStmtClass,
.DoStmtClass,
.ForStmtClass,
.IfStmtClass,
.ReturnStmtClass,
.NullStmtClass,
.WhileStmtClass,
=> return transStmt(c, scope, stmt, .unused),
else => {
var block_scope = try Scope.Block.init(c, scope, false);
defer block_scope.deinit();
const result = try transStmt(c, &block_scope.base, stmt, .unused);
try block_scope.statements.append(result);
return block_scope.complete(c);
},
}
}
fn transIfStmt(
c: *Context,
scope: *Scope,
stmt: *const clang.IfStmt,
) TransError!Node {
// if (c) t
// if (c) t else e
var cond_scope = Scope.Condition{
.base = .{
.parent = scope,
.id = .condition,
},
};
defer cond_scope.deinit();
const cond_expr = @ptrCast(*const clang.Expr, stmt.getCond());
const cond = try transBoolExpr(c, &cond_scope.base, cond_expr, .used);
const then_body = try maybeBlockify(c, scope, stmt.getThen());
const else_body = if (stmt.getElse()) |expr|
try maybeBlockify(c, scope, expr)
else
null;
return Tag.@"if".create(c.arena, .{ .cond = cond, .then = then_body, .@"else" = else_body });
}
fn transWhileLoop(
c: *Context,
scope: *Scope,
stmt: *const clang.WhileStmt,
) TransError!Node {
var cond_scope = Scope.Condition{
.base = .{
.parent = scope,
.id = .condition,
},
};
defer cond_scope.deinit();
const cond_expr = @ptrCast(*const clang.Expr, stmt.getCond());
const cond = try transBoolExpr(c, &cond_scope.base, cond_expr, .used);
var loop_scope = Scope{
.parent = scope,
.id = .loop,
};
const body = try maybeBlockify(c, &loop_scope, stmt.getBody());
return Tag.@"while".create(c.arena, .{ .cond = cond, .body = body, .cont_expr = null });
}
fn transDoWhileLoop(
c: *Context,
scope: *Scope,
stmt: *const clang.DoStmt,
) TransError!Node {
var loop_scope = Scope{
.parent = scope,
.id = .do_loop,
};
// if (!cond) break;
var cond_scope = Scope.Condition{
.base = .{
.parent = scope,
.id = .condition,
},
};
defer cond_scope.deinit();
const cond = try transBoolExpr(c, &cond_scope.base, @ptrCast(*const clang.Expr, stmt.getCond()), .used);
const if_not_break = switch (cond.tag()) {
.false_literal => return transStmt(c, scope, stmt.getBody(), .unused),
.true_literal => {
const body_node = try maybeBlockify(c, scope, stmt.getBody());
return Tag.while_true.create(c.arena, body_node);
},
else => try Tag.if_not_break.create(c.arena, cond),
};
const body_node = if (stmt.getBody().getStmtClass() == .CompoundStmtClass) blk: {
// there's already a block in C, so we'll append our condition to it.
// c: do {
// c: a;
// c: b;
// c: } while(c);
// zig: while (true) {
// zig: a;
// zig: b;
// zig: if (!cond) break;
// zig: }
const node = try transStmt(c, &loop_scope, stmt.getBody(), .unused);
const block = node.castTag(.block).?;
block.data.stmts.len += 1; // This is safe since we reserve one extra space in Scope.Block.complete.
block.data.stmts[block.data.stmts.len - 1] = if_not_break;
break :blk node;
} else blk: {
// the C statement is without a block, so we need to create a block to contain it.
// c: do
// c: a;
// c: while(c);
// zig: while (true) {
// zig: a;
// zig: if (!cond) break;
// zig: }
const statements = try c.arena.alloc(Node, 2);
statements[0] = try transStmt(c, &loop_scope, stmt.getBody(), .unused);
statements[1] = if_not_break;
break :blk try Tag.block.create(c.arena, .{ .label = null, .stmts = statements });
};
return Tag.while_true.create(c.arena, body_node);
}
fn transForLoop(
c: *Context,
scope: *Scope,
stmt: *const clang.ForStmt,
) TransError!Node {
var loop_scope = Scope{
.parent = scope,
.id = .loop,
};
var block_scope: ?Scope.Block = null;
defer if (block_scope) |*bs| bs.deinit();
if (stmt.getInit()) |init| {
block_scope = try Scope.Block.init(c, scope, false);
loop_scope.parent = &block_scope.?.base;
const init_node = try transStmt(c, &block_scope.?.base, init, .unused);
if (init_node.tag() != .declaration) try block_scope.?.statements.append(init_node);
}
var cond_scope = Scope.Condition{
.base = .{
.parent = &loop_scope,
.id = .condition,
},
};
defer cond_scope.deinit();
const cond = if (stmt.getCond()) |cond|
try transBoolExpr(c, &cond_scope.base, cond, .used)
else
Tag.true_literal.init();
const cont_expr = if (stmt.getInc()) |incr|
try transExpr(c, &cond_scope.base, incr, .unused)
else
null;
const body = try maybeBlockify(c, &loop_scope, stmt.getBody());
const while_node = try Tag.@"while".create(c.arena, .{ .cond = cond, .body = body, .cont_expr = cont_expr });
if (block_scope) |*bs| {
try bs.statements.append(while_node);
return try bs.complete(c);
} else {
return while_node;
}
}
fn transSwitch(
c: *Context,
scope: *Scope,
stmt: *const clang.SwitchStmt,
) TransError!Node {
var loop_scope = Scope{
.parent = scope,
.id = .loop,
};
var block_scope = try Scope.Block.init(c, &loop_scope, false);
defer block_scope.deinit();
const base_scope = &block_scope.base;
var cond_scope = Scope.Condition{
.base = .{
.parent = base_scope,
.id = .condition,
},
};
defer cond_scope.deinit();
const switch_expr = try transExpr(c, &cond_scope.base, stmt.getCond(), .used);
var cases = std.ArrayList(Node).init(c.gpa);
defer cases.deinit();
var has_default = false;
const body = stmt.getBody();
assert(body.getStmtClass() == .CompoundStmtClass);
const compound_stmt = @ptrCast(*const clang.CompoundStmt, body);
var it = compound_stmt.body_begin();
const end_it = compound_stmt.body_end();
// Iterate over switch body and collect all cases.
// Fallthrough is handled by duplicating statements.
while (it != end_it) : (it += 1) {
switch (it[0].getStmtClass()) {
.CaseStmtClass => {
var items = std.ArrayList(Node).init(c.gpa);
defer items.deinit();
const sub = try transCaseStmt(c, base_scope, it[0], &items);
const res = try transSwitchProngStmt(c, base_scope, sub, it, end_it);
if (items.items.len == 0) {
has_default = true;
const switch_else = try Tag.switch_else.create(c.arena, res);
try cases.append(switch_else);
} else {
const switch_prong = try Tag.switch_prong.create(c.arena, .{
.cases = try c.arena.dupe(Node, items.items),
.cond = res,
});
try cases.append(switch_prong);
}
},
.DefaultStmtClass => {
has_default = true;
const default_stmt = @ptrCast(*const clang.DefaultStmt, it[0]);
var sub = default_stmt.getSubStmt();
while (true) switch (sub.getStmtClass()) {
.CaseStmtClass => sub = @ptrCast(*const clang.CaseStmt, sub).getSubStmt(),
.DefaultStmtClass => sub = @ptrCast(*const clang.DefaultStmt, sub).getSubStmt(),
else => break,
};
const res = try transSwitchProngStmt(c, base_scope, sub, it, end_it);
const switch_else = try Tag.switch_else.create(c.arena, res);
try cases.append(switch_else);
},
else => {}, // collected in transSwitchProngStmt
}
}
if (!has_default) {
const else_prong = try Tag.switch_else.create(c.arena, Tag.empty_block.init());
try cases.append(else_prong);
}
const switch_node = try Tag.@"switch".create(c.arena, .{
.cond = switch_expr,
.cases = try c.arena.dupe(Node, cases.items),
});
try block_scope.statements.append(switch_node);
try block_scope.statements.append(Tag.@"break".init());
const while_body = try block_scope.complete(c);
return Tag.while_true.create(c.arena, while_body);
}
/// Collects all items for this case, returns the first statement after the labels.
/// If items ends up empty, the prong should be translated as an else.
fn transCaseStmt(c: *Context, scope: *Scope, stmt: *const clang.Stmt, items: *std.ArrayList(Node)) TransError!*const clang.Stmt {
var sub = stmt;
var seen_default = false;
while (true) {
switch (sub.getStmtClass()) {
.DefaultStmtClass => {
seen_default = true;
items.items.len = 0;
const default_stmt = @ptrCast(*const clang.DefaultStmt, sub);
sub = default_stmt.getSubStmt();
},
.CaseStmtClass => {
const case_stmt = @ptrCast(*const clang.CaseStmt, sub);
if (seen_default) {
items.items.len = 0;
sub = case_stmt.getSubStmt();
continue;
}
const expr = if (case_stmt.getRHS()) |rhs| blk: {
const lhs_node = try transExprCoercing(c, scope, case_stmt.getLHS(), .used);
const rhs_node = try transExprCoercing(c, scope, rhs, .used);
break :blk try Tag.ellipsis3.create(c.arena, .{ .lhs = lhs_node, .rhs = rhs_node });
} else try transExprCoercing(c, scope, case_stmt.getLHS(), .used);
try items.append(expr);
sub = case_stmt.getSubStmt();
},
else => return sub,
}
}
}
/// Collects all statements seen by this case into a block.
/// Avoids creating a block if the first statement is a break or return.
fn transSwitchProngStmt(
c: *Context,
scope: *Scope,
stmt: *const clang.Stmt,
parent_it: clang.CompoundStmt.ConstBodyIterator,
parent_end_it: clang.CompoundStmt.ConstBodyIterator,
) TransError!Node {
switch (stmt.getStmtClass()) {
.BreakStmtClass => return Tag.@"break".init(),
.ReturnStmtClass => return transStmt(c, scope, stmt, .unused),
.CaseStmtClass, .DefaultStmtClass => unreachable,
else => {
var block_scope = try Scope.Block.init(c, scope, false);
defer block_scope.deinit();
// we do not need to translate `stmt` since it is the first stmt of `parent_it`
try transSwitchProngStmtInline(c, &block_scope, parent_it, parent_end_it);
return try block_scope.complete(c);
},
}
}
/// Collects all statements seen by this case into a block.
fn transSwitchProngStmtInline(
c: *Context,
block: *Scope.Block,
start_it: clang.CompoundStmt.ConstBodyIterator,
end_it: clang.CompoundStmt.ConstBodyIterator,
) TransError!void {
var it = start_it;
while (it != end_it) : (it += 1) {
switch (it[0].getStmtClass()) {
.ReturnStmtClass => {
const result = try transStmt(c, &block.base, it[0], .unused);
try block.statements.append(result);
return;
},
.BreakStmtClass => {
try block.statements.append(Tag.@"break".init());
return;
},
.CaseStmtClass => {
var sub = @ptrCast(*const clang.CaseStmt, it[0]).getSubStmt();
while (true) switch (sub.getStmtClass()) {
.CaseStmtClass => sub = @ptrCast(*const clang.CaseStmt, sub).getSubStmt(),
.DefaultStmtClass => sub = @ptrCast(*const clang.DefaultStmt, sub).getSubStmt(),
else => break,
};
const result = try transStmt(c, &block.base, sub, .unused);
assert(result.tag() != .declaration);
try block.statements.append(result);
if (result.isNoreturn(true)) {
return;
}
},
.DefaultStmtClass => {
var sub = @ptrCast(*const clang.DefaultStmt, it[0]).getSubStmt();
while (true) switch (sub.getStmtClass()) {
.CaseStmtClass => sub = @ptrCast(*const clang.CaseStmt, sub).getSubStmt(),
.DefaultStmtClass => sub = @ptrCast(*const clang.DefaultStmt, sub).getSubStmt(),
else => break,
};
const result = try transStmt(c, &block.base, sub, .unused);
assert(result.tag() != .declaration);
try block.statements.append(result);
if (result.isNoreturn(true)) {
return;
}
},
.CompoundStmtClass => {
const result = try transCompoundStmt(c, &block.base, @ptrCast(*const clang.CompoundStmt, it[0]));
try block.statements.append(result);
if (result.isNoreturn(true)) {
return;
}
},
else => {
const result = try transStmt(c, &block.base, it[0], .unused);
switch (result.tag()) {
.declaration, .empty_block => {},
else => try block.statements.append(result),
}
},
}
}
return;
}
fn transConstantExpr(c: *Context, scope: *Scope, expr: *const clang.Expr, used: ResultUsed) TransError!Node {
var result: clang.ExprEvalResult = undefined;
if (!expr.evaluateAsConstantExpr(&result, .Normal, c.clang_context))
return fail(c, error.UnsupportedTranslation, expr.getBeginLoc(), "invalid constant expression", .{});
switch (result.Val.getKind()) {
.Int => {
// See comment in `transIntegerLiteral` for why this code is here.
// @as(T, x)
const expr_base = @ptrCast(*const clang.Expr, expr);
const as_node = try Tag.as.create(c.arena, .{
.lhs = try transQualType(c, scope, expr_base.getType(), expr_base.getBeginLoc()),
.rhs = try transCreateNodeAPInt(c, result.Val.getInt()),
});
return maybeSuppressResult(c, scope, used, as_node);
},
else => |kind| {
return fail(c, error.UnsupportedTranslation, expr.getBeginLoc(), "unsupported constant expression kind '{s}'", .{kind});
},
}
}
fn transPredefinedExpr(c: *Context, scope: *Scope, expr: *const clang.PredefinedExpr, used: ResultUsed) TransError!Node {
return transStringLiteral(c, scope, expr.getFunctionName(), used);
}
fn transCreateCharLitNode(c: *Context, narrow: bool, val: u32) TransError!Node {
return Tag.char_literal.create(c.arena, if (narrow)
try std.fmt.allocPrint(c.arena, "'{'}'", .{std.zig.fmtEscapes(&.{@intCast(u8, val)})})
else
try std.fmt.allocPrint(c.arena, "'\\u{{{x}}}'", .{val}));
}
fn transCharLiteral(
c: *Context,
scope: *Scope,
stmt: *const clang.CharacterLiteral,
result_used: ResultUsed,
suppress_as: SuppressCast,
) TransError!Node {
const kind = stmt.getKind();
const val = stmt.getValue();
const narrow = kind == .Ascii or kind == .UTF8;
// C has a somewhat obscure feature called multi-character character constant
// e.g. 'abcd'
const int_lit_node = if (kind == .Ascii and val > 255)
try transCreateNodeNumber(c, val, .int)
else
try transCreateCharLitNode(c, narrow, val);
if (suppress_as == .no_as) {
return maybeSuppressResult(c, scope, result_used, int_lit_node);
}
// See comment in `transIntegerLiteral` for why this code is here.
// @as(T, x)
const expr_base = @ptrCast(*const clang.Expr, stmt);
const as_node = try Tag.as.create(c.arena, .{
.lhs = try transQualType(c, scope, expr_base.getType(), expr_base.getBeginLoc()),
.rhs = int_lit_node,
});
return maybeSuppressResult(c, scope, result_used, as_node);
}
fn transStmtExpr(c: *Context, scope: *Scope, stmt: *const clang.StmtExpr, used: ResultUsed) TransError!Node {
const comp = stmt.getSubStmt();
if (used == .unused) {
return transCompoundStmt(c, scope, comp);
}
var block_scope = try Scope.Block.init(c, scope, true);
defer block_scope.deinit();
var it = comp.body_begin();
const end_it = comp.body_end();
while (it != end_it - 1) : (it += 1) {
const result = try transStmt(c, &block_scope.base, it[0], .unused);
switch (result.tag()) {
.declaration, .empty_block => {},
else => try block_scope.statements.append(result),
}
}
const break_node = try Tag.break_val.create(c.arena, .{
.label = block_scope.label,
.val = try transStmt(c, &block_scope.base, it[0], .used),
});
try block_scope.statements.append(break_node);
const res = try block_scope.complete(c);
return maybeSuppressResult(c, scope, used, res);
}
fn transMemberExpr(c: *Context, scope: *Scope, stmt: *const clang.MemberExpr, result_used: ResultUsed) TransError!Node {
var container_node = try transExpr(c, scope, stmt.getBase(), .used);
if (stmt.isArrow()) {
container_node = try Tag.deref.create(c.arena, container_node);
}
const member_decl = stmt.getMemberDecl();
const name = blk: {
const decl_kind = @ptrCast(*const clang.Decl, member_decl).getKind();
// If we're referring to a anonymous struct/enum find the bogus name
// we've assigned to it during the RecordDecl translation
if (decl_kind == .Field) {
const field_decl = @ptrCast(*const clang.FieldDecl, member_decl);
if (field_decl.isAnonymousStructOrUnion()) {
const name = c.decl_table.get(@ptrToInt(field_decl.getCanonicalDecl())).?;
break :blk try mem.dupe(c.arena, u8, name);
}
}
const decl = @ptrCast(*const clang.NamedDecl, member_decl);
break :blk try c.str(decl.getName_bytes_begin());
};
var node = try Tag.field_access.create(c.arena, .{ .lhs = container_node, .field_name = name });
if (exprIsFlexibleArrayRef(c, @ptrCast(*const clang.Expr, stmt))) {
node = try Tag.call.create(c.arena, .{ .lhs = node, .args = &.{} });
}
return maybeSuppressResult(c, scope, result_used, node);
}
/// ptr[subscr] (`subscr` is a signed integer expression, `ptr` a pointer) becomes:
/// (blk: {
/// const tmp = subscr;
/// if (tmp >= 0) break :blk ptr + @intCast(usize, tmp) else break :blk ptr - ~@bitCast(usize, @intCast(isize, tmp) +% -1);
/// }).*
/// Todo: rip this out once `[*]T + isize` becomes valid.
fn transSignedArrayAccess(
c: *Context,
scope: *Scope,
container_expr: *const clang.Expr,
subscr_expr: *const clang.Expr,
result_used: ResultUsed,
) TransError!Node {
var block_scope = try Scope.Block.init(c, scope, true);
defer block_scope.deinit();
const tmp = try block_scope.makeMangledName(c, "tmp");
const subscr_node = try transExpr(c, &block_scope.base, subscr_expr, .used);
const subscr_decl = try Tag.var_simple.create(c.arena, .{ .name = tmp, .init = subscr_node });
try block_scope.statements.append(subscr_decl);
const tmp_ref = try Tag.identifier.create(c.arena, tmp);
const container_node = try transExpr(c, &block_scope.base, container_expr, .used);
const cond_node = try Tag.greater_than_equal.create(c.arena, .{ .lhs = tmp_ref, .rhs = Tag.zero_literal.init() });
const then_value = try Tag.add.create(c.arena, .{
.lhs = container_node,
.rhs = try Tag.int_cast.create(c.arena, .{
.lhs = try Tag.type.create(c.arena, "usize"),
.rhs = tmp_ref,
}),
});
const then_body = try Tag.break_val.create(c.arena, .{
.label = block_scope.label,
.val = then_value,
});
const minuend = container_node;
const signed_size = try Tag.int_cast.create(c.arena, .{
.lhs = try Tag.type.create(c.arena, "isize"),
.rhs = tmp_ref,
});
const to_cast = try Tag.add_wrap.create(c.arena, .{
.lhs = signed_size,
.rhs = try Tag.negate.create(c.arena, Tag.one_literal.init()),
});
const bitcast_node = try Tag.bit_cast.create(c.arena, .{
.lhs = try Tag.type.create(c.arena, "usize"),
.rhs = to_cast,
});
const subtrahend = try Tag.bit_not.create(c.arena, bitcast_node);
const difference = try Tag.sub.create(c.arena, .{
.lhs = minuend,
.rhs = subtrahend,
});
const else_body = try Tag.break_val.create(c.arena, .{
.label = block_scope.label,
.val = difference,
});
const if_node = try Tag.@"if".create(c.arena, .{
.cond = cond_node,
.then = then_body,
.@"else" = else_body,
});
try block_scope.statements.append(if_node);
const block_node = try block_scope.complete(c);
const derefed = try Tag.deref.create(c.arena, block_node);
return maybeSuppressResult(c, &block_scope.base, result_used, derefed);
}
fn transArrayAccess(c: *Context, scope: *Scope, stmt: *const clang.ArraySubscriptExpr, result_used: ResultUsed) TransError!Node {
const base_stmt = stmt.getBase();
const base_qt = getExprQualType(c, base_stmt);
const is_vector = cIsVector(base_qt);
const subscr_expr = stmt.getIdx();
const subscr_qt = getExprQualType(c, subscr_expr);
const is_longlong = cIsLongLongInteger(subscr_qt);
const is_signed = cIsSignedInteger(subscr_qt);
const is_nonnegative_int_literal = cIsNonNegativeIntLiteral(c, subscr_expr);
// Unwrap the base statement if it's an array decayed to a bare pointer type
// so that we index the array itself
var unwrapped_base = base_stmt;
if (@ptrCast(*const clang.Stmt, base_stmt).getStmtClass() == .ImplicitCastExprClass) {
const implicit_cast = @ptrCast(*const clang.ImplicitCastExpr, base_stmt);
if (implicit_cast.getCastKind() == .ArrayToPointerDecay) {
unwrapped_base = implicit_cast.getSubExpr();
}
}
// Special case: actual pointer (not decayed array) and signed integer subscript
// See discussion at https://github.com/ziglang/zig/pull/8589
if (is_signed and (base_stmt == unwrapped_base) and !is_vector and !is_nonnegative_int_literal) return transSignedArrayAccess(c, scope, base_stmt, subscr_expr, result_used);
const container_node = try transExpr(c, scope, unwrapped_base, .used);
const rhs = if (is_longlong or is_signed) blk: {
// check if long long first so that signed long long doesn't just become unsigned long long
const typeid_node = if (is_longlong) try Tag.type.create(c.arena, "usize") else try transQualTypeIntWidthOf(c, subscr_qt, false);
break :blk try Tag.int_cast.create(c.arena, .{ .lhs = typeid_node, .rhs = try transExpr(c, scope, subscr_expr, .used) });
} else try transExpr(c, scope, subscr_expr, .used);
const node = try Tag.array_access.create(c.arena, .{
.lhs = container_node,
.rhs = rhs,
});
return maybeSuppressResult(c, scope, result_used, node);
}
/// Check if an expression is ultimately a reference to a function declaration
/// (which means it should not be unwrapped with `.?` in translated code)
fn cIsFunctionDeclRef(expr: *const clang.Expr) bool {
switch (expr.getStmtClass()) {
.ParenExprClass => {
const op_expr = @ptrCast(*const clang.ParenExpr, expr).getSubExpr();
return cIsFunctionDeclRef(op_expr);
},
.DeclRefExprClass => {
const decl_ref = @ptrCast(*const clang.DeclRefExpr, expr);
const value_decl = decl_ref.getDecl();
const qt = value_decl.getType();
return qualTypeChildIsFnProto(qt);
},
.ImplicitCastExprClass => {
const implicit_cast = @ptrCast(*const clang.ImplicitCastExpr, expr);
const cast_kind = implicit_cast.getCastKind();
if (cast_kind == .BuiltinFnToFnPtr) return true;
if (cast_kind == .FunctionToPointerDecay) {
return cIsFunctionDeclRef(implicit_cast.getSubExpr());
}
return false;
},
.UnaryOperatorClass => {
const un_op = @ptrCast(*const clang.UnaryOperator, expr);
const opcode = un_op.getOpcode();
return (opcode == .AddrOf or opcode == .Deref) and cIsFunctionDeclRef(un_op.getSubExpr());
},
.GenericSelectionExprClass => {
const gen_sel = @ptrCast(*const clang.GenericSelectionExpr, expr);
return cIsFunctionDeclRef(gen_sel.getResultExpr());
},
else => return false,
}
}
fn transCallExpr(c: *Context, scope: *Scope, stmt: *const clang.CallExpr, result_used: ResultUsed) TransError!Node {
const callee = stmt.getCallee();
var raw_fn_expr = try transExpr(c, scope, callee, .used);
var is_ptr = false;
const fn_ty = qualTypeGetFnProto(callee.getType(), &is_ptr);
const fn_expr = if (is_ptr and fn_ty != null and !cIsFunctionDeclRef(callee))
try Tag.unwrap.create(c.arena, raw_fn_expr)
else
raw_fn_expr;
const num_args = stmt.getNumArgs();
const args = try c.arena.alloc(Node, num_args);
const c_args = stmt.getArgs();
var i: usize = 0;
while (i < num_args) : (i += 1) {
var arg = try transExpr(c, scope, c_args[i], .used);
// In C the result type of a boolean expression is int. If this result is passed as
// an argument to a function whose parameter is also int, there is no cast. Therefore
// in Zig we'll need to cast it from bool to u1 (which will safely coerce to c_int).
if (fn_ty) |ty| {
switch (ty) {
.Proto => |fn_proto| {
const param_count = fn_proto.getNumParams();
if (i < param_count) {
const param_qt = fn_proto.getParamType(@intCast(c_uint, i));
if (isBoolRes(arg) and cIsNativeInt(param_qt)) {
arg = try Tag.bool_to_int.create(c.arena, arg);
} else if (arg.tag() == .string_literal and qualTypeIsCharStar(param_qt)) {
const loc = @ptrCast(*const clang.Stmt, stmt).getBeginLoc();
const dst_type_node = try transQualType(c, scope, param_qt, loc);
arg = try removeCVQualifiers(c, dst_type_node, arg);
}
}
},
else => {},
}
}
args[i] = arg;
}
const node = try Tag.call.create(c.arena, .{ .lhs = fn_expr, .args = args });
if (fn_ty) |ty| {
const canon = ty.getReturnType().getCanonicalType();
const ret_ty = canon.getTypePtr();
if (ret_ty.isVoidType()) {
return node;
}
}
return maybeSuppressResult(c, scope, result_used, node);
}
const ClangFunctionType = union(enum) {
Proto: *const clang.FunctionProtoType,
NoProto: *const clang.FunctionType,
fn getReturnType(self: @This()) clang.QualType {
switch (@as(meta.Tag(@This()), self)) {
.Proto => return self.Proto.getReturnType(),
.NoProto => return self.NoProto.getReturnType(),
}
}
};
fn qualTypeGetFnProto(qt: clang.QualType, is_ptr: *bool) ?ClangFunctionType {
const canon = qt.getCanonicalType();
var ty = canon.getTypePtr();
is_ptr.* = false;
if (ty.getTypeClass() == .Pointer) {
is_ptr.* = true;
const child_qt = ty.getPointeeType();
ty = child_qt.getTypePtr();
}
if (ty.getTypeClass() == .FunctionProto) {
return ClangFunctionType{ .Proto = @ptrCast(*const clang.FunctionProtoType, ty) };
}
if (ty.getTypeClass() == .FunctionNoProto) {
return ClangFunctionType{ .NoProto = @ptrCast(*const clang.FunctionType, ty) };
}
return null;
}
fn transUnaryExprOrTypeTraitExpr(
c: *Context,
scope: *Scope,
stmt: *const clang.UnaryExprOrTypeTraitExpr,
result_used: ResultUsed,
) TransError!Node {
_ = result_used;
const loc = stmt.getBeginLoc();
const type_node = try transQualType(c, scope, stmt.getTypeOfArgument(), loc);
const kind = stmt.getKind();
switch (kind) {
.SizeOf => return Tag.sizeof.create(c.arena, type_node),
.AlignOf => return Tag.alignof.create(c.arena, type_node),
.PreferredAlignOf,
.VecStep,
.OpenMPRequiredSimdAlign,
=> return fail(
c,
error.UnsupportedTranslation,
loc,
"unsupported type trait kind {}",
.{kind},
),
}
}
fn qualTypeHasWrappingOverflow(qt: clang.QualType) bool {
if (cIsUnsignedInteger(qt)) {
// unsigned integer overflow wraps around.
return true;
} else {
// float, signed integer, and pointer overflow is undefined behavior.
return false;
}
}
fn transUnaryOperator(c: *Context, scope: *Scope, stmt: *const clang.UnaryOperator, used: ResultUsed) TransError!Node {
const op_expr = stmt.getSubExpr();
switch (stmt.getOpcode()) {
.PostInc => if (qualTypeHasWrappingOverflow(stmt.getType()))
return transCreatePostCrement(c, scope, stmt, .add_wrap_assign, used)
else
return transCreatePostCrement(c, scope, stmt, .add_assign, used),
.PostDec => if (qualTypeHasWrappingOverflow(stmt.getType()))
return transCreatePostCrement(c, scope, stmt, .sub_wrap_assign, used)
else
return transCreatePostCrement(c, scope, stmt, .sub_assign, used),
.PreInc => if (qualTypeHasWrappingOverflow(stmt.getType()))
return transCreatePreCrement(c, scope, stmt, .add_wrap_assign, used)
else
return transCreatePreCrement(c, scope, stmt, .add_assign, used),
.PreDec => if (qualTypeHasWrappingOverflow(stmt.getType()))
return transCreatePreCrement(c, scope, stmt, .sub_wrap_assign, used)
else
return transCreatePreCrement(c, scope, stmt, .sub_assign, used),
.AddrOf => {
if (cIsFunctionDeclRef(op_expr)) {
return transExpr(c, scope, op_expr, used);
}
return Tag.address_of.create(c.arena, try transExpr(c, scope, op_expr, used));
},
.Deref => {
if (qualTypeWasDemotedToOpaque(c, stmt.getType()))
return fail(c, error.UnsupportedTranslation, stmt.getBeginLoc(), "cannot dereference opaque type", .{});
const node = try transExpr(c, scope, op_expr, used);
var is_ptr = false;
const fn_ty = qualTypeGetFnProto(op_expr.getType(), &is_ptr);
if (fn_ty != null and is_ptr)
return node;
return Tag.deref.create(c.arena, node);
},
.Plus => return transExpr(c, scope, op_expr, used),
.Minus => {
if (!qualTypeHasWrappingOverflow(op_expr.getType())) {
return Tag.negate.create(c.arena, try transExpr(c, scope, op_expr, .used));
} else if (cIsUnsignedInteger(op_expr.getType())) {
// use -% x for unsigned integers
return Tag.negate_wrap.create(c.arena, try transExpr(c, scope, op_expr, .used));
} else return fail(c, error.UnsupportedTranslation, stmt.getBeginLoc(), "C negation with non float non integer", .{});
},
.Not => {
return Tag.bit_not.create(c.arena, try transExpr(c, scope, op_expr, .used));
},
.LNot => {
return Tag.not.create(c.arena, try transBoolExpr(c, scope, op_expr, .used));
},
.Extension => {
return transExpr(c, scope, stmt.getSubExpr(), used);
},
else => return fail(c, error.UnsupportedTranslation, stmt.getBeginLoc(), "unsupported C translation {}", .{stmt.getOpcode()}),
}
}
fn transCreatePreCrement(
c: *Context,
scope: *Scope,
stmt: *const clang.UnaryOperator,
op: Tag,
used: ResultUsed,
) TransError!Node {
const op_expr = stmt.getSubExpr();
if (used == .unused) {
// common case
// c: ++expr
// zig: expr += 1
const lhs = try transExpr(c, scope, op_expr, .used);
const rhs = Tag.one_literal.init();
return transCreateNodeInfixOp(c, scope, op, lhs, rhs, .used);
}
// worst case
// c: ++expr
// zig: (blk: {
// zig: const _ref = &expr;
// zig: _ref.* += 1;
// zig: break :blk _ref.*
// zig: })
var block_scope = try Scope.Block.init(c, scope, true);
defer block_scope.deinit();
const ref = try block_scope.makeMangledName(c, "ref");
const expr = try transExpr(c, &block_scope.base, op_expr, .used);
const addr_of = try Tag.address_of.create(c.arena, expr);
const ref_decl = try Tag.var_simple.create(c.arena, .{ .name = ref, .init = addr_of });
try block_scope.statements.append(ref_decl);
const lhs_node = try Tag.identifier.create(c.arena, ref);
const ref_node = try Tag.deref.create(c.arena, lhs_node);
const node = try transCreateNodeInfixOp(c, &block_scope.base, op, ref_node, Tag.one_literal.init(), .used);
try block_scope.statements.append(node);
const break_node = try Tag.break_val.create(c.arena, .{
.label = block_scope.label,
.val = ref_node,
});
try block_scope.statements.append(break_node);
return block_scope.complete(c);
}
fn transCreatePostCrement(
c: *Context,
scope: *Scope,
stmt: *const clang.UnaryOperator,
op: Tag,
used: ResultUsed,
) TransError!Node {
const op_expr = stmt.getSubExpr();
if (used == .unused) {
// common case
// c: expr++
// zig: expr += 1
const lhs = try transExpr(c, scope, op_expr, .used);
const rhs = Tag.one_literal.init();
return transCreateNodeInfixOp(c, scope, op, lhs, rhs, .used);
}
// worst case
// c: expr++
// zig: (blk: {
// zig: const _ref = &expr;
// zig: const _tmp = _ref.*;
// zig: _ref.* += 1;
// zig: break :blk _tmp
// zig: })
var block_scope = try Scope.Block.init(c, scope, true);
defer block_scope.deinit();
const ref = try block_scope.makeMangledName(c, "ref");
const expr = try transExpr(c, &block_scope.base, op_expr, .used);
const addr_of = try Tag.address_of.create(c.arena, expr);
const ref_decl = try Tag.var_simple.create(c.arena, .{ .name = ref, .init = addr_of });
try block_scope.statements.append(ref_decl);
const lhs_node = try Tag.identifier.create(c.arena, ref);
const ref_node = try Tag.deref.create(c.arena, lhs_node);
const tmp = try block_scope.makeMangledName(c, "tmp");
const tmp_decl = try Tag.var_simple.create(c.arena, .{ .name = tmp, .init = ref_node });
try block_scope.statements.append(tmp_decl);
const node = try transCreateNodeInfixOp(c, &block_scope.base, op, ref_node, Tag.one_literal.init(), .used);
try block_scope.statements.append(node);
const break_node = try Tag.break_val.create(c.arena, .{
.label = block_scope.label,
.val = try Tag.identifier.create(c.arena, tmp),
});
try block_scope.statements.append(break_node);
return block_scope.complete(c);
}
fn transCompoundAssignOperator(c: *Context, scope: *Scope, stmt: *const clang.CompoundAssignOperator, used: ResultUsed) TransError!Node {
switch (stmt.getOpcode()) {
.MulAssign => if (qualTypeHasWrappingOverflow(stmt.getType()))
return transCreateCompoundAssign(c, scope, stmt, .mul_wrap_assign, used)
else
return transCreateCompoundAssign(c, scope, stmt, .mul_assign, used),
.AddAssign => if (qualTypeHasWrappingOverflow(stmt.getType()))
return transCreateCompoundAssign(c, scope, stmt, .add_wrap_assign, used)
else
return transCreateCompoundAssign(c, scope, stmt, .add_assign, used),
.SubAssign => if (qualTypeHasWrappingOverflow(stmt.getType()))
return transCreateCompoundAssign(c, scope, stmt, .sub_wrap_assign, used)
else
return transCreateCompoundAssign(c, scope, stmt, .sub_assign, used),
.DivAssign => return transCreateCompoundAssign(c, scope, stmt, .div_assign, used),
.RemAssign => return transCreateCompoundAssign(c, scope, stmt, .mod_assign, used),
.ShlAssign => return transCreateCompoundAssign(c, scope, stmt, .shl_assign, used),
.ShrAssign => return transCreateCompoundAssign(c, scope, stmt, .shr_assign, used),
.AndAssign => return transCreateCompoundAssign(c, scope, stmt, .bit_and_assign, used),
.XorAssign => return transCreateCompoundAssign(c, scope, stmt, .bit_xor_assign, used),
.OrAssign => return transCreateCompoundAssign(c, scope, stmt, .bit_or_assign, used),
else => return fail(
c,
error.UnsupportedTranslation,
stmt.getBeginLoc(),
"unsupported C translation {}",
.{stmt.getOpcode()},
),
}
}
fn transCreateCompoundAssign(
c: *Context,
scope: *Scope,
stmt: *const clang.CompoundAssignOperator,
op: Tag,
used: ResultUsed,
) TransError!Node {
const is_shift = op == .shl_assign or op == .shr_assign;
const is_div = op == .div_assign;
const is_mod = op == .mod_assign;
const lhs = stmt.getLHS();
const rhs = stmt.getRHS();
const loc = stmt.getBeginLoc();
const lhs_qt = getExprQualType(c, lhs);
const rhs_qt = getExprQualType(c, rhs);
const is_signed = cIsSignedInteger(lhs_qt);
const is_ptr_op_signed = qualTypeIsPtr(lhs_qt) and cIsSignedInteger(rhs_qt);
const requires_int_cast = blk: {
const are_integers = cIsInteger(lhs_qt) and cIsInteger(rhs_qt);
const are_same_sign = cIsSignedInteger(lhs_qt) == cIsSignedInteger(rhs_qt);
break :blk are_integers and !(are_same_sign and cIntTypeCmp(lhs_qt, rhs_qt) == .eq);
};
if (used == .unused) {
// common case
// c: lhs += rhs
// zig: lhs += rhs
const lhs_node = try transExpr(c, scope, lhs, .used);
var rhs_node = try transExpr(c, scope, rhs, .used);
if (is_ptr_op_signed) rhs_node = try usizeCastForWrappingPtrArithmetic(c.arena, rhs_node);
if ((is_mod or is_div) and is_signed) {
if (requires_int_cast) rhs_node = try transCCast(c, scope, loc, lhs_qt, rhs_qt, rhs_node);
const operands = .{ .lhs = lhs_node, .rhs = rhs_node };
const builtin = if (is_mod)
try Tag.rem.create(c.arena, operands)
else
try Tag.div_trunc.create(c.arena, operands);
return transCreateNodeInfixOp(c, scope, .assign, lhs_node, builtin, .used);
}
if (is_shift) {
const cast_to_type = try qualTypeToLog2IntRef(c, scope, rhs_qt, loc);
rhs_node = try Tag.int_cast.create(c.arena, .{ .lhs = cast_to_type, .rhs = rhs_node });
} else if (requires_int_cast) {
rhs_node = try transCCast(c, scope, loc, lhs_qt, rhs_qt, rhs_node);
}
return transCreateNodeInfixOp(c, scope, op, lhs_node, rhs_node, .used);
}
// worst case
// c: lhs += rhs
// zig: (blk: {
// zig: const _ref = &lhs;
// zig: _ref.* += rhs;
// zig: break :blk _ref.*
// zig: })
var block_scope = try Scope.Block.init(c, scope, true);
defer block_scope.deinit();
const ref = try block_scope.makeMangledName(c, "ref");
const expr = try transExpr(c, &block_scope.base, lhs, .used);
const addr_of = try Tag.address_of.create(c.arena, expr);
const ref_decl = try Tag.var_simple.create(c.arena, .{ .name = ref, .init = addr_of });
try block_scope.statements.append(ref_decl);
const lhs_node = try Tag.identifier.create(c.arena, ref);
const ref_node = try Tag.deref.create(c.arena, lhs_node);
var rhs_node = try transExpr(c, &block_scope.base, rhs, .used);
if (is_ptr_op_signed) rhs_node = try usizeCastForWrappingPtrArithmetic(c.arena, rhs_node);
if ((is_mod or is_div) and is_signed) {
if (requires_int_cast) rhs_node = try transCCast(c, scope, loc, lhs_qt, rhs_qt, rhs_node);
const operands = .{ .lhs = ref_node, .rhs = rhs_node };
const builtin = if (is_mod)
try Tag.rem.create(c.arena, operands)
else
try Tag.div_trunc.create(c.arena, operands);
const assign = try transCreateNodeInfixOp(c, &block_scope.base, .assign, ref_node, builtin, .used);
try block_scope.statements.append(assign);
} else {
if (is_shift) {
const cast_to_type = try qualTypeToLog2IntRef(c, &block_scope.base, rhs_qt, loc);
rhs_node = try Tag.int_cast.create(c.arena, .{ .lhs = cast_to_type, .rhs = rhs_node });
} else if (requires_int_cast) {
rhs_node = try transCCast(c, &block_scope.base, loc, lhs_qt, rhs_qt, rhs_node);
}
const assign = try transCreateNodeInfixOp(c, &block_scope.base, op, ref_node, rhs_node, .used);
try block_scope.statements.append(assign);
}
const break_node = try Tag.break_val.create(c.arena, .{
.label = block_scope.label,
.val = ref_node,
});
try block_scope.statements.append(break_node);
return block_scope.complete(c);
}
// Casting away const or volatile requires us to use @intToPtr
fn removeCVQualifiers(c: *Context, dst_type_node: Node, expr: Node) Error!Node {
const ptr_to_int = try Tag.ptr_to_int.create(c.arena, expr);
return Tag.int_to_ptr.create(c.arena, .{ .lhs = dst_type_node, .rhs = ptr_to_int });
}
fn transCPtrCast(
c: *Context,
scope: *Scope,
loc: clang.SourceLocation,
dst_type: clang.QualType,
src_type: clang.QualType,
expr: Node,
) !Node {
const ty = dst_type.getTypePtr();
const child_type = ty.getPointeeType();
const src_ty = src_type.getTypePtr();
const src_child_type = src_ty.getPointeeType();
const dst_type_node = try transType(c, scope, ty, loc);
if (!src_ty.isArrayType() and ((src_child_type.isConstQualified() and
!child_type.isConstQualified()) or
(src_child_type.isVolatileQualified() and
!child_type.isVolatileQualified())))
{
return removeCVQualifiers(c, dst_type_node, expr);
} else {
// Implicit downcasting from higher to lower alignment values is forbidden,
// use @alignCast to side-step this problem
const rhs = if (qualTypeCanon(child_type).isVoidType())
// void has 1-byte alignment, so @alignCast is not needed
expr
else if (typeIsOpaque(c, qualTypeCanon(child_type), loc))
// For opaque types a ptrCast is enough
expr
else blk: {
const child_type_node = try transQualType(c, scope, child_type, loc);
const alignof = try Tag.std_meta_alignment.create(c.arena, child_type_node);
const align_cast = try Tag.align_cast.create(c.arena, .{ .lhs = alignof, .rhs = expr });
break :blk align_cast;
};
return Tag.ptr_cast.create(c.arena, .{ .lhs = dst_type_node, .rhs = rhs });
}
}
fn transFloatingLiteral(c: *Context, scope: *Scope, expr: *const clang.FloatingLiteral, used: ResultUsed) TransError!Node {
switch (expr.getRawSemantics()) {
.IEEEhalf, // f16
.IEEEsingle, // f32
.IEEEdouble, // f64
=> {},
else => |format| return fail(
c,
error.UnsupportedTranslation,
expr.getBeginLoc(),
"unsupported floating point constant format {}",
.{format},
),
}
// TODO use something more accurate
var dbl = expr.getValueAsApproximateDouble();
const is_negative = dbl < 0;
if (is_negative) dbl = -dbl;
const str = if (dbl == std.math.floor(dbl))
try std.fmt.allocPrint(c.arena, "{d}.0", .{dbl})
else
try std.fmt.allocPrint(c.arena, "{d}", .{dbl});
var node = try Tag.float_literal.create(c.arena, str);
if (is_negative) node = try Tag.negate.create(c.arena, node);
return maybeSuppressResult(c, scope, used, node);
}
fn transBinaryConditionalOperator(c: *Context, scope: *Scope, stmt: *const clang.BinaryConditionalOperator, used: ResultUsed) TransError!Node {
// GNU extension of the ternary operator where the middle expression is
// omitted, the condition itself is returned if it evaluates to true
const qt = @ptrCast(*const clang.Expr, stmt).getType();
const res_is_bool = qualTypeIsBoolean(qt);
const casted_stmt = @ptrCast(*const clang.AbstractConditionalOperator, stmt);
const cond_expr = casted_stmt.getCond();
const false_expr = casted_stmt.getFalseExpr();
// c: (cond_expr)?:(false_expr)
// zig: (blk: {
// const _cond_temp = (cond_expr);
// break :blk if (_cond_temp) _cond_temp else (false_expr);
// })
var block_scope = try Scope.Block.init(c, scope, true);
defer block_scope.deinit();
const mangled_name = try block_scope.makeMangledName(c, "cond_temp");
const init_node = try transExpr(c, &block_scope.base, cond_expr, .used);
const ref_decl = try Tag.var_simple.create(c.arena, .{ .name = mangled_name, .init = init_node });
try block_scope.statements.append(ref_decl);
var cond_scope = Scope.Condition{
.base = .{
.parent = &block_scope.base,
.id = .condition,
},
};
defer cond_scope.deinit();
const cond_ident = try Tag.identifier.create(c.arena, mangled_name);
const ty = getExprQualType(c, cond_expr).getTypePtr();
const cond_node = try finishBoolExpr(c, &cond_scope.base, cond_expr.getBeginLoc(), ty, cond_ident, .used);
var then_body = cond_ident;
if (!res_is_bool and isBoolRes(init_node)) {
then_body = try Tag.bool_to_int.create(c.arena, then_body);
}
var else_body = try transExpr(c, &block_scope.base, false_expr, .used);
if (!res_is_bool and isBoolRes(else_body)) {
else_body = try Tag.bool_to_int.create(c.arena, else_body);
}
const if_node = try Tag.@"if".create(c.arena, .{
.cond = cond_node,
.then = then_body,
.@"else" = else_body,
});
const break_node = try Tag.break_val.create(c.arena, .{
.label = block_scope.label,
.val = if_node,
});
try block_scope.statements.append(break_node);
const res = try block_scope.complete(c);
return maybeSuppressResult(c, scope, used, res);
}
fn transConditionalOperator(c: *Context, scope: *Scope, stmt: *const clang.ConditionalOperator, used: ResultUsed) TransError!Node {
var cond_scope = Scope.Condition{
.base = .{
.parent = scope,
.id = .condition,
},
};
defer cond_scope.deinit();
const qt = @ptrCast(*const clang.Expr, stmt).getType();
const res_is_bool = qualTypeIsBoolean(qt);
const casted_stmt = @ptrCast(*const clang.AbstractConditionalOperator, stmt);
const cond_expr = casted_stmt.getCond();
const true_expr = casted_stmt.getTrueExpr();
const false_expr = casted_stmt.getFalseExpr();
const cond = try transBoolExpr(c, &cond_scope.base, cond_expr, .used);
var then_body = try transExpr(c, scope, true_expr, used);
if (!res_is_bool and isBoolRes(then_body)) {
then_body = try Tag.bool_to_int.create(c.arena, then_body);
}
var else_body = try transExpr(c, scope, false_expr, used);
if (!res_is_bool and isBoolRes(else_body)) {
else_body = try Tag.bool_to_int.create(c.arena, else_body);
}
const if_node = try Tag.@"if".create(c.arena, .{
.cond = cond,
.then = then_body,
.@"else" = else_body,
});
// Clang inserts ImplicitCast(ToVoid)'s to both rhs and lhs so we don't need to suppress the result here.
return if_node;
}
fn maybeSuppressResult(
c: *Context,
scope: *Scope,
used: ResultUsed,
result: Node,
) TransError!Node {
_ = scope;
if (used == .used) return result;
return Tag.discard.create(c.arena, .{ .should_skip = false, .value = result });
}
fn addTopLevelDecl(c: *Context, name: []const u8, decl_node: Node) !void {
try c.global_scope.sym_table.put(name, decl_node);
try c.global_scope.nodes.append(decl_node);
}
/// Translate a qualtype for a variable with an initializer. This only matters
/// for incomplete arrays, since the initializer determines the size of the array.
fn transQualTypeInitialized(
c: *Context,
scope: *Scope,
qt: clang.QualType,
decl_init: *const clang.Expr,
source_loc: clang.SourceLocation,
) TypeError!Node {
const ty = qt.getTypePtr();
if (ty.getTypeClass() == .IncompleteArray) {
const incomplete_array_ty = @ptrCast(*const clang.IncompleteArrayType, ty);
const elem_ty = try transType(c, scope, incomplete_array_ty.getElementType().getTypePtr(), source_loc);
switch (decl_init.getStmtClass()) {
.StringLiteralClass => {
const string_lit = @ptrCast(*const clang.StringLiteral, decl_init);
const string_lit_size = string_lit.getLength();
const array_size = @intCast(usize, string_lit_size);
// incomplete array initialized with empty string, will be translated as [1]T{0}
// see https://github.com/ziglang/zig/issues/8256
if (array_size == 0) return Tag.array_type.create(c.arena, .{ .len = 1, .elem_type = elem_ty });
return Tag.null_sentinel_array_type.create(c.arena, .{ .len = array_size, .elem_type = elem_ty });
},
.InitListExprClass => {
const init_expr = @ptrCast(*const clang.InitListExpr, decl_init);
const size = init_expr.getNumInits();
return Tag.array_type.create(c.arena, .{ .len = size, .elem_type = elem_ty });
},
else => {},
}
}
return transQualType(c, scope, qt, source_loc);
}
fn transQualType(c: *Context, scope: *Scope, qt: clang.QualType, source_loc: clang.SourceLocation) TypeError!Node {
return transType(c, scope, qt.getTypePtr(), source_loc);
}
/// Produces a Zig AST node by translating a Clang QualType, respecting the width, but modifying the signed-ness.
/// Asserts the type is an integer.
fn transQualTypeIntWidthOf(c: *Context, ty: clang.QualType, is_signed: bool) TypeError!Node {
return transTypeIntWidthOf(c, qualTypeCanon(ty), is_signed);
}
/// Produces a Zig AST node by translating a Clang Type, respecting the width, but modifying the signed-ness.
/// Asserts the type is an integer.
fn transTypeIntWidthOf(c: *Context, ty: *const clang.Type, is_signed: bool) TypeError!Node {
assert(ty.getTypeClass() == .Builtin);
const builtin_ty = @ptrCast(*const clang.BuiltinType, ty);
return Tag.type.create(c.arena, switch (builtin_ty.getKind()) {
.Char_U, .Char_S, .UChar, .SChar, .Char8 => if (is_signed) "i8" else "u8",
.UShort, .Short => if (is_signed) "c_short" else "c_ushort",
.UInt, .Int => if (is_signed) "c_int" else "c_uint",
.ULong, .Long => if (is_signed) "c_long" else "c_ulong",
.ULongLong, .LongLong => if (is_signed) "c_longlong" else "c_ulonglong",
.UInt128, .Int128 => if (is_signed) "i128" else "u128",
.Char16 => if (is_signed) "i16" else "u16",
.Char32 => if (is_signed) "i32" else "u32",
else => unreachable, // only call this function when it has already been determined the type is int
});
}
fn isCBuiltinType(qt: clang.QualType, kind: clang.BuiltinTypeKind) bool {
const c_type = qualTypeCanon(qt);
if (c_type.getTypeClass() != .Builtin)
return false;
const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type);
return builtin_ty.getKind() == kind;
}
fn qualTypeIsPtr(qt: clang.QualType) bool {
return qualTypeCanon(qt).getTypeClass() == .Pointer;
}
fn qualTypeIsBoolean(qt: clang.QualType) bool {
return qualTypeCanon(qt).isBooleanType();
}
fn qualTypeIntBitWidth(c: *Context, qt: clang.QualType) !u32 {
const ty = qt.getTypePtr();
switch (ty.getTypeClass()) {
.Builtin => {
const builtin_ty = @ptrCast(*const clang.BuiltinType, ty);
switch (builtin_ty.getKind()) {
.Char_U,
.UChar,
.Char_S,
.SChar,
=> return 8,
.UInt128,
.Int128,
=> return 128,
else => return 0,
}
unreachable;
},
.Typedef => {
const typedef_ty = @ptrCast(*const clang.TypedefType, ty);
const typedef_decl = typedef_ty.getDecl();
const type_name = try c.str(@ptrCast(*const clang.NamedDecl, typedef_decl).getName_bytes_begin());
if (mem.eql(u8, type_name, "uint8_t") or mem.eql(u8, type_name, "int8_t")) {
return 8;
} else if (mem.eql(u8, type_name, "uint16_t") or mem.eql(u8, type_name, "int16_t")) {
return 16;
} else if (mem.eql(u8, type_name, "uint32_t") or mem.eql(u8, type_name, "int32_t")) {
return 32;
} else if (mem.eql(u8, type_name, "uint64_t") or mem.eql(u8, type_name, "int64_t")) {
return 64;
} else {
return 0;
}
},
else => return 0,
}
}
fn qualTypeToLog2IntRef(c: *Context, scope: *Scope, qt: clang.QualType, source_loc: clang.SourceLocation) !Node {
const int_bit_width = try qualTypeIntBitWidth(c, qt);
if (int_bit_width != 0) {
// we can perform the log2 now.
const cast_bit_width = math.log2_int(u64, int_bit_width);
return Tag.log2_int_type.create(c.arena, cast_bit_width);
}
const zig_type = try transQualType(c, scope, qt, source_loc);
return Tag.std_math_Log2Int.create(c.arena, zig_type);
}
fn qualTypeChildIsFnProto(qt: clang.QualType) bool {
const ty = qualTypeCanon(qt);
switch (ty.getTypeClass()) {
.FunctionProto, .FunctionNoProto => return true,
else => return false,
}
}
fn qualTypeCanon(qt: clang.QualType) *const clang.Type {
const canon = qt.getCanonicalType();
return canon.getTypePtr();
}
fn getExprQualType(c: *Context, expr: *const clang.Expr) clang.QualType {
blk: {
// If this is a C `char *`, turn it into a `const char *`
if (expr.getStmtClass() != .ImplicitCastExprClass) break :blk;
const cast_expr = @ptrCast(*const clang.ImplicitCastExpr, expr);
if (cast_expr.getCastKind() != .ArrayToPointerDecay) break :blk;
const sub_expr = cast_expr.getSubExpr();
if (sub_expr.getStmtClass() != .StringLiteralClass) break :blk;
const array_qt = sub_expr.getType();
const array_type = @ptrCast(*const clang.ArrayType, array_qt.getTypePtr());
var pointee_qt = array_type.getElementType();
pointee_qt.addConst();
return c.clang_context.getPointerType(pointee_qt);
}
return expr.getType();
}
fn typeIsOpaque(c: *Context, ty: *const clang.Type, loc: clang.SourceLocation) bool {
switch (ty.getTypeClass()) {
.Builtin => {
const builtin_ty = @ptrCast(*const clang.BuiltinType, ty);
return builtin_ty.getKind() == .Void;
},
.Record => {
const record_ty = @ptrCast(*const clang.RecordType, ty);
const record_decl = record_ty.getDecl();
const record_def = record_decl.getDefinition() orelse
return true;
var it = record_def.field_begin();
const end_it = record_def.field_end();
while (it.neq(end_it)) : (it = it.next()) {
const field_decl = it.deref();
if (field_decl.isBitField()) {
return true;
}
}
return false;
},
.Elaborated => {
const elaborated_ty = @ptrCast(*const clang.ElaboratedType, ty);
const qt = elaborated_ty.getNamedType();
return typeIsOpaque(c, qt.getTypePtr(), loc);
},
.Typedef => {
const typedef_ty = @ptrCast(*const clang.TypedefType, ty);
const typedef_decl = typedef_ty.getDecl();
const underlying_type = typedef_decl.getUnderlyingType();
return typeIsOpaque(c, underlying_type.getTypePtr(), loc);
},
else => return false,
}
}
/// plain `char *` (not const; not explicitly signed or unsigned)
fn qualTypeIsCharStar(qt: clang.QualType) bool {
if (qualTypeIsPtr(qt)) {
const child_qt = qualTypeCanon(qt).getPointeeType();
return cIsUnqualifiedChar(child_qt) and !child_qt.isConstQualified();
}
return false;
}
/// C `char` without explicit signed or unsigned qualifier
fn cIsUnqualifiedChar(qt: clang.QualType) bool {
const c_type = qualTypeCanon(qt);
if (c_type.getTypeClass() != .Builtin) return false;
const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type);
return switch (builtin_ty.getKind()) {
.Char_S, .Char_U => true,
else => false,
};
}
fn cIsInteger(qt: clang.QualType) bool {
return cIsSignedInteger(qt) or cIsUnsignedInteger(qt);
}
fn cIsUnsignedInteger(qt: clang.QualType) bool {
const c_type = qualTypeCanon(qt);
if (c_type.getTypeClass() != .Builtin) return false;
const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type);
return switch (builtin_ty.getKind()) {
.Char_U,
.UChar,
.Char_S,
.UShort,
.UInt,
.ULong,
.ULongLong,
.UInt128,
.WChar_U,
=> true,
else => false,
};
}
fn cIntTypeToIndex(qt: clang.QualType) u8 {
const c_type = qualTypeCanon(qt);
assert(c_type.getTypeClass() == .Builtin);
const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type);
return switch (builtin_ty.getKind()) {
.Bool, .Char_U, .Char_S, .UChar, .SChar, .Char8 => 1,
.WChar_U, .WChar_S => 2,
.UShort, .Short, .Char16 => 3,
.UInt, .Int, .Char32 => 4,
.ULong, .Long => 5,
.ULongLong, .LongLong => 6,
.UInt128, .Int128 => 7,
else => unreachable,
};
}
fn cIntTypeCmp(a: clang.QualType, b: clang.QualType) math.Order {
const a_index = cIntTypeToIndex(a);
const b_index = cIntTypeToIndex(b);
return math.order(a_index, b_index);
}
/// Checks if expr is an integer literal >= 0
fn cIsNonNegativeIntLiteral(c: *Context, expr: *const clang.Expr) bool {
if (@ptrCast(*const clang.Stmt, expr).getStmtClass() == .IntegerLiteralClass) {
var signum: c_int = undefined;
if (!(@ptrCast(*const clang.IntegerLiteral, expr).getSignum(&signum, c.clang_context))) {
return false;
}
return signum >= 0;
}
return false;
}
fn cIsSignedInteger(qt: clang.QualType) bool {
const c_type = qualTypeCanon(qt);
if (c_type.getTypeClass() != .Builtin) return false;
const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type);
return switch (builtin_ty.getKind()) {
.SChar,
.Short,
.Int,
.Long,
.LongLong,
.Int128,
.WChar_S,
=> true,
else => false,
};
}
fn cIsNativeInt(qt: clang.QualType) bool {
const c_type = qualTypeCanon(qt);
if (c_type.getTypeClass() != .Builtin) return false;
const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type);
return builtin_ty.getKind() == .Int;
}
fn cIsFloating(qt: clang.QualType) bool {
const c_type = qualTypeCanon(qt);
if (c_type.getTypeClass() != .Builtin) return false;
const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type);
return switch (builtin_ty.getKind()) {
.Float,
.Double,
.Float128,
.LongDouble,
=> true,
else => false,
};
}
fn cIsLongLongInteger(qt: clang.QualType) bool {
const c_type = qualTypeCanon(qt);
if (c_type.getTypeClass() != .Builtin) return false;
const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type);
return switch (builtin_ty.getKind()) {
.LongLong, .ULongLong, .Int128, .UInt128 => true,
else => false,
};
}
fn transCreateNodeAssign(
c: *Context,
scope: *Scope,
result_used: ResultUsed,
lhs: *const clang.Expr,
rhs: *const clang.Expr,
) !Node {
// common case
// c: lhs = rhs
// zig: lhs = rhs
if (result_used == .unused) {
const lhs_node = try transExpr(c, scope, lhs, .used);
var rhs_node = try transExprCoercing(c, scope, rhs, .used);
if (!exprIsBooleanType(lhs) and isBoolRes(rhs_node)) {
rhs_node = try Tag.bool_to_int.create(c.arena, rhs_node);
}
return transCreateNodeInfixOp(c, scope, .assign, lhs_node, rhs_node, .used);
}
// worst case
// c: lhs = rhs
// zig: (blk: {
// zig: const _tmp = rhs;
// zig: lhs = _tmp;
// zig: break :blk _tmp
// zig: })
var block_scope = try Scope.Block.init(c, scope, true);
defer block_scope.deinit();
const tmp = try block_scope.makeMangledName(c, "tmp");
const rhs_node = try transExpr(c, &block_scope.base, rhs, .used);
const tmp_decl = try Tag.var_simple.create(c.arena, .{ .name = tmp, .init = rhs_node });
try block_scope.statements.append(tmp_decl);
const lhs_node = try transExpr(c, &block_scope.base, lhs, .used);
const tmp_ident = try Tag.identifier.create(c.arena, tmp);
const assign = try transCreateNodeInfixOp(c, &block_scope.base, .assign, lhs_node, tmp_ident, .used);
try block_scope.statements.append(assign);
const break_node = try Tag.break_val.create(c.arena, .{
.label = block_scope.label,
.val = tmp_ident,
});
try block_scope.statements.append(break_node);
return block_scope.complete(c);
}
fn transCreateNodeInfixOp(
c: *Context,
scope: *Scope,
op: Tag,
lhs: Node,
rhs: Node,
used: ResultUsed,
) !Node {
const payload = try c.arena.create(ast.Payload.BinOp);
payload.* = .{
.base = .{ .tag = op },
.data = .{
.lhs = lhs,
.rhs = rhs,
},
};
return maybeSuppressResult(c, scope, used, Node.initPayload(&payload.base));
}
fn transCreateNodeBoolInfixOp(
c: *Context,
scope: *Scope,
stmt: *const clang.BinaryOperator,
op: Tag,
used: ResultUsed,
) !Node {
std.debug.assert(op == .@"and" or op == .@"or");
const lhs = try transBoolExpr(c, scope, stmt.getLHS(), .used);
const rhs = try transBoolExpr(c, scope, stmt.getRHS(), .used);
return transCreateNodeInfixOp(c, scope, op, lhs, rhs, used);
}
fn transCreateNodeAPInt(c: *Context, int: *const clang.APSInt) !Node {
const num_limbs = math.cast(usize, int.getNumWords()) catch |err| switch (err) {
error.Overflow => return error.OutOfMemory,
};
var aps_int = int;
const is_negative = int.isSigned() and int.isNegative();
if (is_negative) aps_int = aps_int.negate();
defer if (is_negative) {
aps_int.free();
};
const limbs = try c.arena.alloc(math.big.Limb, num_limbs);
defer c.arena.free(limbs);
const data = aps_int.getRawData();
switch (@sizeOf(math.big.Limb)) {
8 => {
var i: usize = 0;
while (i < num_limbs) : (i += 1) {
limbs[i] = data[i];
}
},
4 => {
var limb_i: usize = 0;
var data_i: usize = 0;
while (limb_i < num_limbs) : ({
limb_i += 2;
data_i += 1;
}) {
limbs[limb_i] = @truncate(u32, data[data_i]);
limbs[limb_i + 1] = @truncate(u32, data[data_i] >> 32);
}
},
else => @compileError("unimplemented"),
}
const big: math.big.int.Const = .{ .limbs = limbs, .positive = true };
const str = big.toStringAlloc(c.arena, 10, .lower) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
};
const res = try Tag.integer_literal.create(c.arena, str);
if (is_negative) return Tag.negate.create(c.arena, res);
return res;
}
fn transCreateNodeNumber(c: *Context, num: anytype, num_kind: enum { int, float }) !Node {
const fmt_s = if (comptime meta.trait.isNumber(@TypeOf(num))) "{d}" else "{s}";
const str = try std.fmt.allocPrint(c.arena, fmt_s, .{num});
if (num_kind == .float)
return Tag.float_literal.create(c.arena, str)
else
return Tag.integer_literal.create(c.arena, str);
}
fn transCreateNodeMacroFn(c: *Context, name: []const u8, ref: Node, proto_alias: *ast.Payload.Func) !Node {
var fn_params = std.ArrayList(ast.Payload.Param).init(c.gpa);
defer fn_params.deinit();
for (proto_alias.data.params) |param| {
const param_name = param.name orelse
try std.fmt.allocPrint(c.arena, "arg_{d}", .{c.getMangle()});
try fn_params.append(.{
.name = param_name,
.type = param.type,
.is_noalias = param.is_noalias,
});
}
const init = if (ref.castTag(.var_decl)) |v|
v.data.init.?
else if (ref.castTag(.var_simple) orelse ref.castTag(.pub_var_simple)) |v|
v.data.init
else
unreachable;
const unwrap_expr = try Tag.unwrap.create(c.arena, init);
const args = try c.arena.alloc(Node, fn_params.items.len);
for (fn_params.items) |param, i| {
args[i] = try Tag.identifier.create(c.arena, param.name.?);
}
const call_expr = try Tag.call.create(c.arena, .{
.lhs = unwrap_expr,
.args = args,
});
const return_expr = try Tag.@"return".create(c.arena, call_expr);
const block = try Tag.block_single.create(c.arena, return_expr);
return Tag.pub_inline_fn.create(c.arena, .{
.name = name,
.params = try c.arena.dupe(ast.Payload.Param, fn_params.items),
.return_type = proto_alias.data.return_type,
.body = block,
});
}
fn transCreateNodeShiftOp(
c: *Context,
scope: *Scope,
stmt: *const clang.BinaryOperator,
op: Tag,
used: ResultUsed,
) !Node {
std.debug.assert(op == .shl or op == .shr);
const lhs_expr = stmt.getLHS();
const rhs_expr = stmt.getRHS();
const rhs_location = rhs_expr.getBeginLoc();
// lhs >> @as(u5, rh)
const lhs = try transExpr(c, scope, lhs_expr, .used);
const rhs_type = try qualTypeToLog2IntRef(c, scope, stmt.getType(), rhs_location);
const rhs = try transExprCoercing(c, scope, rhs_expr, .used);
const rhs_casted = try Tag.int_cast.create(c.arena, .{ .lhs = rhs_type, .rhs = rhs });
return transCreateNodeInfixOp(c, scope, op, lhs, rhs_casted, used);
}
fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clang.SourceLocation) TypeError!Node {
switch (ty.getTypeClass()) {
.Builtin => {
const builtin_ty = @ptrCast(*const clang.BuiltinType, ty);
return Tag.type.create(c.arena, switch (builtin_ty.getKind()) {
.Void => "c_void",
.Bool => "bool",
.Char_U, .UChar, .Char_S, .Char8 => "u8",
.SChar => "i8",
.UShort => "c_ushort",
.UInt => "c_uint",
.ULong => "c_ulong",
.ULongLong => "c_ulonglong",
.Short => "c_short",
.Int => "c_int",
.Long => "c_long",
.LongLong => "c_longlong",
.UInt128 => "u128",
.Int128 => "i128",
.Float => "f32",
.Double => "f64",
.Float128 => "f128",
.Float16 => "f16",
.LongDouble => "c_longdouble",
else => return fail(c, error.UnsupportedType, source_loc, "unsupported builtin type", .{}),
});
},
.FunctionProto => {
const fn_proto_ty = @ptrCast(*const clang.FunctionProtoType, ty);
const fn_proto = try transFnProto(c, null, fn_proto_ty, source_loc, null, false);
return Node.initPayload(&fn_proto.base);
},
.FunctionNoProto => {
const fn_no_proto_ty = @ptrCast(*const clang.FunctionType, ty);
const fn_proto = try transFnNoProto(c, fn_no_proto_ty, source_loc, null, false);
return Node.initPayload(&fn_proto.base);
},
.Paren => {
const paren_ty = @ptrCast(*const clang.ParenType, ty);
return transQualType(c, scope, paren_ty.getInnerType(), source_loc);
},
.Pointer => {
const child_qt = ty.getPointeeType();
if (qualTypeChildIsFnProto(child_qt)) {
return Tag.optional_type.create(c.arena, try transQualType(c, scope, child_qt, source_loc));
}
const is_const = child_qt.isConstQualified();
const is_volatile = child_qt.isVolatileQualified();
const elem_type = try transQualType(c, scope, child_qt, source_loc);
if (typeIsOpaque(c, child_qt.getTypePtr(), source_loc) or qualTypeWasDemotedToOpaque(c, child_qt)) {
const ptr = try Tag.single_pointer.create(c.arena, .{ .is_const = is_const, .is_volatile = is_volatile, .elem_type = elem_type });
return Tag.optional_type.create(c.arena, ptr);
}
return Tag.c_pointer.create(c.arena, .{ .is_const = is_const, .is_volatile = is_volatile, .elem_type = elem_type });
},
.ConstantArray => {
const const_arr_ty = @ptrCast(*const clang.ConstantArrayType, ty);
const size_ap_int = const_arr_ty.getSize();
const size = size_ap_int.getLimitedValue(usize);
const elem_type = try transType(c, scope, const_arr_ty.getElementType().getTypePtr(), source_loc);
return Tag.array_type.create(c.arena, .{ .len = size, .elem_type = elem_type });
},
.IncompleteArray => {
const incomplete_array_ty = @ptrCast(*const clang.IncompleteArrayType, ty);
const child_qt = incomplete_array_ty.getElementType();
const is_const = child_qt.isConstQualified();
const is_volatile = child_qt.isVolatileQualified();
const elem_type = try transQualType(c, scope, child_qt, source_loc);
return Tag.c_pointer.create(c.arena, .{ .is_const = is_const, .is_volatile = is_volatile, .elem_type = elem_type });
},
.Typedef => {
const typedef_ty = @ptrCast(*const clang.TypedefType, ty);
const typedef_decl = typedef_ty.getDecl();
var trans_scope = scope;
if (@ptrCast(*const clang.Decl, typedef_decl).castToNamedDecl()) |named_decl| {
const decl_name = try c.str(named_decl.getName_bytes_begin());
if (c.global_names.get(decl_name)) |_| trans_scope = &c.global_scope.base;
if (builtin_typedef_map.get(decl_name)) |builtin| return Tag.type.create(c.arena, builtin);
}
try transTypeDef(c, trans_scope, typedef_decl);
const name = c.decl_table.get(@ptrToInt(typedef_decl.getCanonicalDecl())).?;
return Tag.identifier.create(c.arena, name);
},
.Record => {
const record_ty = @ptrCast(*const clang.RecordType, ty);
const record_decl = record_ty.getDecl();
var trans_scope = scope;
if (@ptrCast(*const clang.Decl, record_decl).castToNamedDecl()) |named_decl| {
const decl_name = try c.str(named_decl.getName_bytes_begin());
if (c.global_names.get(decl_name)) |_| trans_scope = &c.global_scope.base;
}
try transRecordDecl(c, trans_scope, record_decl);
const name = c.decl_table.get(@ptrToInt(record_decl.getCanonicalDecl())).?;
return Tag.identifier.create(c.arena, name);
},
.Enum => {
const enum_ty = @ptrCast(*const clang.EnumType, ty);
const enum_decl = enum_ty.getDecl();
var trans_scope = scope;
if (@ptrCast(*const clang.Decl, enum_decl).castToNamedDecl()) |named_decl| {
const decl_name = try c.str(named_decl.getName_bytes_begin());
if (c.global_names.get(decl_name)) |_| trans_scope = &c.global_scope.base;
}
try transEnumDecl(c, trans_scope, enum_decl);
const name = c.decl_table.get(@ptrToInt(enum_decl.getCanonicalDecl())).?;
return Tag.identifier.create(c.arena, name);
},
.Elaborated => {
const elaborated_ty = @ptrCast(*const clang.ElaboratedType, ty);
return transQualType(c, scope, elaborated_ty.getNamedType(), source_loc);
},
.Decayed => {
const decayed_ty = @ptrCast(*const clang.DecayedType, ty);
return transQualType(c, scope, decayed_ty.getDecayedType(), source_loc);
},
.Attributed => {
const attributed_ty = @ptrCast(*const clang.AttributedType, ty);
return transQualType(c, scope, attributed_ty.getEquivalentType(), source_loc);
},
.MacroQualified => {
const macroqualified_ty = @ptrCast(*const clang.MacroQualifiedType, ty);
return transQualType(c, scope, macroqualified_ty.getModifiedType(), source_loc);
},
.TypeOf => {
const typeof_ty = @ptrCast(*const clang.TypeOfType, ty);
return transQualType(c, scope, typeof_ty.getUnderlyingType(), source_loc);
},
.TypeOfExpr => {
const typeofexpr_ty = @ptrCast(*const clang.TypeOfExprType, ty);
const underlying_expr = transExpr(c, scope, typeofexpr_ty.getUnderlyingExpr(), .used) catch |err| switch (err) {
error.UnsupportedTranslation => {
return fail(c, error.UnsupportedType, source_loc, "unsupported underlying expression for TypeOfExpr", .{});
},
else => |e| return e,
};
return Tag.typeof.create(c.arena, underlying_expr);
},
.Vector => {
const vector_ty = @ptrCast(*const clang.VectorType, ty);
const num_elements = vector_ty.getNumElements();
const element_qt = vector_ty.getElementType();
return Tag.std_meta_vector.create(c.arena, .{
.lhs = try transCreateNodeNumber(c, num_elements, .int),
.rhs = try transQualType(c, scope, element_qt, source_loc),
});
},
.ExtInt, .ExtVector => {
const type_name = c.str(ty.getTypeClassName());
return fail(c, error.UnsupportedType, source_loc, "TODO implement translation of type: '{s}'", .{type_name});
},
else => {
const type_name = c.str(ty.getTypeClassName());
return fail(c, error.UnsupportedType, source_loc, "unsupported type: '{s}'", .{type_name});
},
}
}
fn qualTypeWasDemotedToOpaque(c: *Context, qt: clang.QualType) bool {
const ty = qt.getTypePtr();
switch (qt.getTypeClass()) {
.Typedef => {
const typedef_ty = @ptrCast(*const clang.TypedefType, ty);
const typedef_decl = typedef_ty.getDecl();
const underlying_type = typedef_decl.getUnderlyingType();
return qualTypeWasDemotedToOpaque(c, underlying_type);
},
.Record => {
const record_ty = @ptrCast(*const clang.RecordType, ty);
const record_decl = record_ty.getDecl();
const canonical = @ptrToInt(record_decl.getCanonicalDecl());
return c.opaque_demotes.contains(canonical);
},
.Enum => {
const enum_ty = @ptrCast(*const clang.EnumType, ty);
const enum_decl = enum_ty.getDecl();
const canonical = @ptrToInt(enum_decl.getCanonicalDecl());
return c.opaque_demotes.contains(canonical);
},
.Elaborated => {
const elaborated_ty = @ptrCast(*const clang.ElaboratedType, ty);
return qualTypeWasDemotedToOpaque(c, elaborated_ty.getNamedType());
},
.Decayed => {
const decayed_ty = @ptrCast(*const clang.DecayedType, ty);
return qualTypeWasDemotedToOpaque(c, decayed_ty.getDecayedType());
},
.Attributed => {
const attributed_ty = @ptrCast(*const clang.AttributedType, ty);
return qualTypeWasDemotedToOpaque(c, attributed_ty.getEquivalentType());
},
.MacroQualified => {
const macroqualified_ty = @ptrCast(*const clang.MacroQualifiedType, ty);
return qualTypeWasDemotedToOpaque(c, macroqualified_ty.getModifiedType());
},
else => return false,
}
}
fn isCVoid(qt: clang.QualType) bool {
const ty = qt.getTypePtr();
if (ty.getTypeClass() == .Builtin) {
const builtin_ty = @ptrCast(*const clang.BuiltinType, ty);
return builtin_ty.getKind() == .Void;
}
return false;
}
const FnDeclContext = struct {
fn_name: []const u8,
has_body: bool,
storage_class: clang.StorageClass,
is_export: bool,
};
fn transCC(
c: *Context,
fn_ty: *const clang.FunctionType,
source_loc: clang.SourceLocation,
) !CallingConvention {
const clang_cc = fn_ty.getCallConv();
switch (clang_cc) {
.C => return CallingConvention.C,
.X86StdCall => return CallingConvention.Stdcall,
.X86FastCall => return CallingConvention.Fastcall,
.X86VectorCall, .AArch64VectorCall => return CallingConvention.Vectorcall,
.X86ThisCall => return CallingConvention.Thiscall,
.AAPCS => return CallingConvention.AAPCS,
.AAPCS_VFP => return CallingConvention.AAPCSVFP,
.X86_64SysV => return CallingConvention.SysV,
else => return fail(
c,
error.UnsupportedType,
source_loc,
"unsupported calling convention: {s}",
.{@tagName(clang_cc)},
),
}
}
fn transFnProto(
c: *Context,
fn_decl: ?*const clang.FunctionDecl,
fn_proto_ty: *const clang.FunctionProtoType,
source_loc: clang.SourceLocation,
fn_decl_context: ?FnDeclContext,
is_pub: bool,
) !*ast.Payload.Func {
const fn_ty = @ptrCast(*const clang.FunctionType, fn_proto_ty);
const cc = try transCC(c, fn_ty, source_loc);
const is_var_args = fn_proto_ty.isVariadic();
return finishTransFnProto(c, fn_decl, fn_proto_ty, fn_ty, source_loc, fn_decl_context, is_var_args, cc, is_pub);
}
fn transFnNoProto(
c: *Context,
fn_ty: *const clang.FunctionType,
source_loc: clang.SourceLocation,
fn_decl_context: ?FnDeclContext,
is_pub: bool,
) !*ast.Payload.Func {
const cc = try transCC(c, fn_ty, source_loc);
const is_var_args = if (fn_decl_context) |ctx| (!ctx.is_export and ctx.storage_class != .Static) else true;
return finishTransFnProto(c, null, null, fn_ty, source_loc, fn_decl_context, is_var_args, cc, is_pub);
}
fn finishTransFnProto(
c: *Context,
fn_decl: ?*const clang.FunctionDecl,
fn_proto_ty: ?*const clang.FunctionProtoType,
fn_ty: *const clang.FunctionType,
source_loc: clang.SourceLocation,
fn_decl_context: ?FnDeclContext,
is_var_args: bool,
cc: CallingConvention,
is_pub: bool,
) !*ast.Payload.Func {
const is_export = if (fn_decl_context) |ctx| ctx.is_export else false;
const is_extern = if (fn_decl_context) |ctx| !ctx.has_body else false;
const scope = &c.global_scope.base;
// TODO check for always_inline attribute
// TODO check for align attribute
var fn_params = std.ArrayList(ast.Payload.Param).init(c.gpa);
defer fn_params.deinit();
const param_count: usize = if (fn_proto_ty != null) fn_proto_ty.?.getNumParams() else 0;
try fn_params.ensureCapacity(param_count);
var i: usize = 0;
while (i < param_count) : (i += 1) {
const param_qt = fn_proto_ty.?.getParamType(@intCast(c_uint, i));
const is_noalias = param_qt.isRestrictQualified();
const param_name: ?[]const u8 =
if (fn_decl) |decl|
blk: {
const param = decl.getParamDecl(@intCast(c_uint, i));
const param_name: []const u8 = try c.str(@ptrCast(*const clang.NamedDecl, param).getName_bytes_begin());
if (param_name.len < 1)
break :blk null;
break :blk param_name;
} else null;
const type_node = try transQualType(c, scope, param_qt, source_loc);
fn_params.addOneAssumeCapacity().* = .{
.is_noalias = is_noalias,
.name = param_name,
.type = type_node,
};
}
const linksection_string = blk: {
if (fn_decl) |decl| {
var str_len: usize = undefined;
if (decl.getSectionAttribute(&str_len)) |str_ptr| {
break :blk str_ptr[0..str_len];
}
}
break :blk null;
};
const alignment = if (fn_decl) |decl| zigAlignment(decl.getAlignedAttribute(c.clang_context)) else null;
const explicit_callconv = if ((is_export or is_extern) and cc == .C) null else cc;
const return_type_node = blk: {
if (fn_ty.getNoReturnAttr()) {
break :blk Tag.noreturn_type.init();
} else {
const return_qt = fn_ty.getReturnType();
if (isCVoid(return_qt)) {
// convert primitive c_void to actual void (only for return type)
break :blk Tag.void_type.init();
} else {
break :blk transQualType(c, scope, return_qt, source_loc) catch |err| switch (err) {
error.UnsupportedType => {
try warn(c, scope, source_loc, "unsupported function proto return type", .{});
return err;
},
error.OutOfMemory => |e| return e,
};
}
}
};
const name: ?[]const u8 = if (fn_decl_context) |ctx| ctx.fn_name else null;
const payload = try c.arena.create(ast.Payload.Func);
payload.* = .{
.base = .{ .tag = .func },
.data = .{
.is_pub = is_pub,
.is_extern = is_extern,
.is_export = is_export,
.is_var_args = is_var_args,
.name = name,
.linksection_string = linksection_string,
.explicit_callconv = explicit_callconv,
.params = try c.arena.dupe(ast.Payload.Param, fn_params.items),
.return_type = return_type_node,
.body = null,
.alignment = alignment,
},
};
return payload;
}
fn warn(c: *Context, scope: *Scope, loc: clang.SourceLocation, comptime format: []const u8, args: anytype) !void {
const args_prefix = .{c.locStr(loc)};
const value = try std.fmt.allocPrint(c.arena, "// {s}: warning: " ++ format, args_prefix ++ args);
try scope.appendNode(try Tag.warning.create(c.arena, value));
}
fn fail(
c: *Context,
err: anytype,
source_loc: clang.SourceLocation,
comptime format: []const u8,
args: anytype,
) (@TypeOf(err) || error{OutOfMemory}) {
try warn(c, &c.global_scope.base, source_loc, format, args);
return err;
}
pub fn failDecl(c: *Context, loc: clang.SourceLocation, name: []const u8, comptime format: []const u8, args: anytype) Error!void {
// location
// pub const name = @compileError(msg);
const fail_msg = try std.fmt.allocPrint(c.arena, format, args);
try addTopLevelDecl(c, name, try Tag.fail_decl.create(c.arena, .{ .actual = name, .mangled = fail_msg }));
const location_comment = try std.fmt.allocPrint(c.arena, "// {s}", .{c.locStr(loc)});
try c.global_scope.nodes.append(try Tag.warning.create(c.arena, location_comment));
}
pub fn freeErrors(errors: []ClangErrMsg) void {
errors.ptr.delete(errors.len);
}
const PatternList = struct {
patterns: []Pattern,
/// Templates must be function-like macros
/// first element is macro source, second element is the name of the function
/// in std.lib.zig.c_translation.Macros which implements it
const templates = [_][2][]const u8{
[2][]const u8{ "f_SUFFIX(X) (X ## f)", "F_SUFFIX" },
[2][]const u8{ "F_SUFFIX(X) (X ## F)", "F_SUFFIX" },
[2][]const u8{ "u_SUFFIX(X) (X ## u)", "U_SUFFIX" },
[2][]const u8{ "U_SUFFIX(X) (X ## U)", "U_SUFFIX" },
[2][]const u8{ "l_SUFFIX(X) (X ## l)", "L_SUFFIX" },
[2][]const u8{ "L_SUFFIX(X) (X ## L)", "L_SUFFIX" },
[2][]const u8{ "ul_SUFFIX(X) (X ## ul)", "UL_SUFFIX" },
[2][]const u8{ "uL_SUFFIX(X) (X ## uL)", "UL_SUFFIX" },
[2][]const u8{ "Ul_SUFFIX(X) (X ## Ul)", "UL_SUFFIX" },
[2][]const u8{ "UL_SUFFIX(X) (X ## UL)", "UL_SUFFIX" },
[2][]const u8{ "ll_SUFFIX(X) (X ## ll)", "LL_SUFFIX" },
[2][]const u8{ "LL_SUFFIX(X) (X ## LL)", "LL_SUFFIX" },
[2][]const u8{ "ull_SUFFIX(X) (X ## ull)", "ULL_SUFFIX" },
[2][]const u8{ "uLL_SUFFIX(X) (X ## uLL)", "ULL_SUFFIX" },
[2][]const u8{ "Ull_SUFFIX(X) (X ## Ull)", "ULL_SUFFIX" },
[2][]const u8{ "ULL_SUFFIX(X) (X ## ULL)", "ULL_SUFFIX" },
[2][]const u8{ "CAST_OR_CALL(X, Y) (X)(Y)", "CAST_OR_CALL" },
[2][]const u8{
\\wl_container_of(ptr, sample, member) \
\\(__typeof__(sample))((char *)(ptr) - \
\\ offsetof(__typeof__(*sample), member))
,
"WL_CONTAINER_OF",
},
[2][]const u8{ "IGNORE_ME(X) ((void)(X))", "DISCARD" },
[2][]const u8{ "IGNORE_ME(X) (void)(X)", "DISCARD" },
[2][]const u8{ "IGNORE_ME(X) ((const void)(X))", "DISCARD" },
[2][]const u8{ "IGNORE_ME(X) (const void)(X)", "DISCARD" },
[2][]const u8{ "IGNORE_ME(X) ((volatile void)(X))", "DISCARD" },
[2][]const u8{ "IGNORE_ME(X) (volatile void)(X)", "DISCARD" },
[2][]const u8{ "IGNORE_ME(X) ((const volatile void)(X))", "DISCARD" },
[2][]const u8{ "IGNORE_ME(X) (const volatile void)(X)", "DISCARD" },
[2][]const u8{ "IGNORE_ME(X) ((volatile const void)(X))", "DISCARD" },
[2][]const u8{ "IGNORE_ME(X) (volatile const void)(X)", "DISCARD" },
};
/// Assumes that `ms` represents a tokenized function-like macro.
fn buildArgsHash(allocator: *mem.Allocator, ms: MacroSlicer, hash: *ArgsPositionMap) MacroProcessingError!void {
assert(ms.tokens.len > 2);
assert(ms.tokens[0].id == .Identifier);
assert(ms.tokens[1].id == .LParen);
var i: usize = 2;
while (true) : (i += 1) {
const token = ms.tokens[i];
switch (token.id) {
.RParen => break,
.Comma => continue,
.Identifier => {
const identifier = ms.slice(token);
try hash.put(allocator, identifier, i);
},
else => return error.UnexpectedMacroToken,
}
}
}
const Pattern = struct {
tokens: []const CToken,
source: []const u8,
impl: []const u8,
args_hash: ArgsPositionMap,
fn init(self: *Pattern, allocator: *mem.Allocator, template: [2][]const u8) Error!void {
const source = template[0];
const impl = template[1];
var tok_list = std.ArrayList(CToken).init(allocator);
defer tok_list.deinit();
try tokenizeMacro(source, &tok_list);
const tokens = try allocator.dupe(CToken, tok_list.items);
self.* = .{
.tokens = tokens,
.source = source,
.impl = impl,
.args_hash = .{},
};
const ms = MacroSlicer{ .source = source, .tokens = tokens };
buildArgsHash(allocator, ms, &self.args_hash) catch |err| switch (err) {
error.UnexpectedMacroToken => unreachable,
else => |e| return e,
};
}
fn deinit(self: *Pattern, allocator: *mem.Allocator) void {
self.args_hash.deinit(allocator);
allocator.free(self.tokens);
}
/// This function assumes that `ms` has already been validated to contain a function-like
/// macro, and that the parsed template macro in `self` also contains a function-like
/// macro. Please review this logic carefully if changing that assumption. Two
/// function-like macros are considered equivalent if and only if they contain the same
/// list of tokens, modulo parameter names.
fn isEquivalent(self: Pattern, ms: MacroSlicer, args_hash: ArgsPositionMap) bool {
if (self.tokens.len != ms.tokens.len) return false;
if (args_hash.count() != self.args_hash.count()) return false;
var i: usize = 2;
while (self.tokens[i].id != .RParen) : (i += 1) {}
const pattern_slicer = MacroSlicer{ .source = self.source, .tokens = self.tokens };
while (i < self.tokens.len) : (i += 1) {
const pattern_token = self.tokens[i];
const macro_token = ms.tokens[i];
if (meta.activeTag(pattern_token.id) != meta.activeTag(macro_token.id)) return false;
const pattern_bytes = pattern_slicer.slice(pattern_token);
const macro_bytes = ms.slice(macro_token);
switch (pattern_token.id) {
.Identifier => {
const pattern_arg_index = self.args_hash.get(pattern_bytes);
const macro_arg_index = args_hash.get(macro_bytes);
if (pattern_arg_index == null and macro_arg_index == null) {
if (!mem.eql(u8, pattern_bytes, macro_bytes)) return false;
} else if (pattern_arg_index != null and macro_arg_index != null) {
if (pattern_arg_index.? != macro_arg_index.?) return false;
} else {
return false;
}
},
.MacroString, .StringLiteral, .CharLiteral, .IntegerLiteral, .FloatLiteral => {
if (!mem.eql(u8, pattern_bytes, macro_bytes)) return false;
},
else => {
// other tags correspond to keywords and operators that do not contain a "payload"
// that can vary
},
}
}
return true;
}
};
fn init(allocator: *mem.Allocator) Error!PatternList {
const patterns = try allocator.alloc(Pattern, templates.len);
for (templates) |template, i| {
try patterns[i].init(allocator, template);
}
return PatternList{ .patterns = patterns };
}
fn deinit(self: *PatternList, allocator: *mem.Allocator) void {
for (self.patterns) |*pattern| pattern.deinit(allocator);
allocator.free(self.patterns);
}
fn match(self: PatternList, allocator: *mem.Allocator, ms: MacroSlicer) Error!?Pattern {
var args_hash: ArgsPositionMap = .{};
defer args_hash.deinit(allocator);
buildArgsHash(allocator, ms, &args_hash) catch |err| switch (err) {
error.UnexpectedMacroToken => return null,
else => |e| return e,
};
for (self.patterns) |pattern| if (pattern.isEquivalent(ms, args_hash)) return pattern;
return null;
}
};
const MacroSlicer = struct {
source: []const u8,
tokens: []const CToken,
fn slice(self: MacroSlicer, token: CToken) []const u8 {
return self.source[token.start..token.end];
}
};
// Testing here instead of test/translate_c.zig allows us to also test that the
// mapped function exists in `std.zig.c_translation.Macros`
test "Macro matching" {
const helper = struct {
const MacroFunctions = @import("std").zig.c_translation.Macros;
fn checkMacro(allocator: *mem.Allocator, pattern_list: PatternList, source: []const u8, comptime expected_match: ?[]const u8) !void {
var tok_list = std.ArrayList(CToken).init(allocator);
defer tok_list.deinit();
try tokenizeMacro(source, &tok_list);
const macro_slicer = MacroSlicer{ .source = source, .tokens = tok_list.items };
const matched = try pattern_list.match(allocator, macro_slicer);
if (expected_match) |expected| {
try testing.expectEqualStrings(expected, matched.?.impl);
try testing.expect(@hasDecl(MacroFunctions, expected));
} else {
try testing.expectEqual(@as(@TypeOf(matched), null), matched);
}
}
};
const allocator = std.testing.allocator;
var pattern_list = try PatternList.init(allocator);
defer pattern_list.deinit(allocator);
try helper.checkMacro(allocator, pattern_list, "BAR(Z) (Z ## F)", "F_SUFFIX");
try helper.checkMacro(allocator, pattern_list, "BAR(Z) (Z ## U)", "U_SUFFIX");
try helper.checkMacro(allocator, pattern_list, "BAR(Z) (Z ## L)", "L_SUFFIX");
try helper.checkMacro(allocator, pattern_list, "BAR(Z) (Z ## LL)", "LL_SUFFIX");
try helper.checkMacro(allocator, pattern_list, "BAR(Z) (Z ## UL)", "UL_SUFFIX");
try helper.checkMacro(allocator, pattern_list, "BAR(Z) (Z ## ULL)", "ULL_SUFFIX");
try helper.checkMacro(allocator, pattern_list,
\\container_of(a, b, c) \
\\(__typeof__(b))((char *)(a) - \
\\ offsetof(__typeof__(*b), c))
, "WL_CONTAINER_OF");
try helper.checkMacro(allocator, pattern_list, "NO_MATCH(X, Y) (X + Y)", null);
try helper.checkMacro(allocator, pattern_list, "CAST_OR_CALL(X, Y) (X)(Y)", "CAST_OR_CALL");
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) (void)(X)", "DISCARD");
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) ((void)(X))", "DISCARD");
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) (const void)(X)", "DISCARD");
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) ((const void)(X))", "DISCARD");
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) (volatile void)(X)", "DISCARD");
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) ((volatile void)(X))", "DISCARD");
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) (const volatile void)(X)", "DISCARD");
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) ((const volatile void)(X))", "DISCARD");
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) (volatile const void)(X)", "DISCARD");
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) ((volatile const void)(X))", "DISCARD");
}
const MacroCtx = struct {
source: []const u8,
list: []const CToken,
i: usize = 0,
loc: clang.SourceLocation,
name: []const u8,
fn peek(self: *MacroCtx) ?CToken.Id {
if (self.i >= self.list.len) return null;
return self.list[self.i + 1].id;
}
fn next(self: *MacroCtx) ?CToken.Id {
if (self.i >= self.list.len) return null;
self.i += 1;
return self.list[self.i].id;
}
fn slice(self: *MacroCtx) []const u8 {
const tok = self.list[self.i];
return self.source[tok.start..tok.end];
}
fn fail(self: *MacroCtx, c: *Context, comptime fmt: []const u8, args: anytype) !void {
return failDecl(c, self.loc, self.name, fmt, args);
}
fn makeSlicer(self: *const MacroCtx) MacroSlicer {
return MacroSlicer{ .source = self.source, .tokens = self.list };
}
};
fn tokenizeMacro(source: []const u8, tok_list: *std.ArrayList(CToken)) Error!void {
var tokenizer = std.c.Tokenizer{
.buffer = source,
};
while (true) {
const tok = tokenizer.next();
switch (tok.id) {
.Nl, .Eof => {
try tok_list.append(tok);
break;
},
.LineComment, .MultiLineComment => continue,
else => {},
}
try tok_list.append(tok);
}
}
fn transPreprocessorEntities(c: *Context, unit: *clang.ASTUnit) Error!void {
// TODO if we see #undef, delete it from the table
var it = unit.getLocalPreprocessingEntities_begin();
const it_end = unit.getLocalPreprocessingEntities_end();
var tok_list = std.ArrayList(CToken).init(c.gpa);
defer tok_list.deinit();
const scope = c.global_scope;
while (it.I != it_end.I) : (it.I += 1) {
const entity = it.deref();
tok_list.items.len = 0;
switch (entity.getKind()) {
.MacroDefinitionKind => {
const macro = @ptrCast(*clang.MacroDefinitionRecord, entity);
const raw_name = macro.getName_getNameStart();
const begin_loc = macro.getSourceRange_getBegin();
const end_loc = clang.Lexer.getLocForEndOfToken(macro.getSourceRange_getEnd(), c.source_manager, unit);
const name = try c.str(raw_name);
if (scope.containsNow(name)) {
continue;
}
const begin_c = c.source_manager.getCharacterData(begin_loc);
const end_c = c.source_manager.getCharacterData(end_loc);
const slice_len = @ptrToInt(end_c) - @ptrToInt(begin_c);
const slice = begin_c[0..slice_len];
try tokenizeMacro(slice, &tok_list);
var macro_ctx = MacroCtx{
.source = slice,
.list = tok_list.items,
.name = name,
.loc = begin_loc,
};
assert(mem.eql(u8, macro_ctx.slice(), name));
var macro_fn = false;
switch (macro_ctx.peek().?) {
.Identifier => {
// if it equals itself, ignore. for example, from stdio.h:
// #define stdin stdin
const tok = macro_ctx.list[1];
if (mem.eql(u8, name, slice[tok.start..tok.end])) {
continue;
}
},
.Nl, .Eof => {
// this means it is a macro without a value
// we don't care about such things
continue;
},
.LParen => {
// if the name is immediately followed by a '(' then it is a function
macro_fn = macro_ctx.list[0].end == macro_ctx.list[1].start;
},
else => {},
}
(if (macro_fn)
transMacroFnDefine(c, ¯o_ctx)
else
transMacroDefine(c, ¯o_ctx)) catch |err| switch (err) {
error.ParseError => continue,
error.OutOfMemory => |e| return e,
};
},
else => {},
}
}
}
fn transMacroDefine(c: *Context, m: *MacroCtx) ParseError!void {
const scope = &c.global_scope.base;
const init_node = try parseCExpr(c, m, scope);
const last = m.next().?;
if (last != .Eof and last != .Nl)
return m.fail(c, "unable to translate C expr: unexpected token .{s}", .{@tagName(last)});
const var_decl = try Tag.pub_var_simple.create(c.arena, .{ .name = m.name, .init = init_node });
try c.global_scope.macro_table.put(m.name, var_decl);
}
fn transMacroFnDefine(c: *Context, m: *MacroCtx) ParseError!void {
const macro_slicer = m.makeSlicer();
if (try c.pattern_list.match(c.gpa, macro_slicer)) |pattern| {
const decl = try Tag.pub_var_simple.create(c.arena, .{
.name = m.name,
.init = try Tag.helpers_macro.create(c.arena, pattern.impl),
});
try c.global_scope.macro_table.put(m.name, decl);
return;
}
var block_scope = try Scope.Block.init(c, &c.global_scope.base, false);
defer block_scope.deinit();
const scope = &block_scope.base;
if (m.next().? != .LParen) {
return m.fail(c, "unable to translate C expr: expected '('", .{});
}
var fn_params = std.ArrayList(ast.Payload.Param).init(c.gpa);
defer fn_params.deinit();
while (true) {
if (m.peek().? != .Identifier) break;
_ = m.next();
const mangled_name = try block_scope.makeMangledName(c, m.slice());
try fn_params.append(.{
.is_noalias = false,
.name = mangled_name,
.type = Tag.@"anytype".init(),
});
try block_scope.discardVariable(c, mangled_name);
if (m.peek().? != .Comma) break;
_ = m.next();
}
if (m.next().? != .RParen) {
return m.fail(c, "unable to translate C expr: expected ')'", .{});
}
const expr = try parseCExpr(c, m, scope);
const last = m.next().?;
if (last != .Eof and last != .Nl)
return m.fail(c, "unable to translate C expr: unexpected token .{s}", .{@tagName(last)});
const typeof_arg = if (expr.castTag(.block)) |some| blk: {
const stmts = some.data.stmts;
const blk_last = stmts[stmts.len - 1];
const br = blk_last.castTag(.break_val).?;
break :blk br.data.val;
} else expr;
const return_type = if (typeof_arg.castTag(.helpers_cast) orelse typeof_arg.castTag(.std_mem_zeroinit)) |some|
some.data.lhs
else if (typeof_arg.castTag(.std_mem_zeroes)) |some|
some.data
else
try Tag.typeof.create(c.arena, typeof_arg);
const return_expr = try Tag.@"return".create(c.arena, expr);
try block_scope.statements.append(return_expr);
const fn_decl = try Tag.pub_inline_fn.create(c.arena, .{
.name = m.name,
.params = try c.arena.dupe(ast.Payload.Param, fn_params.items),
.return_type = return_type,
.body = try block_scope.complete(c),
});
try c.global_scope.macro_table.put(m.name, fn_decl);
}
const ParseError = Error || error{ParseError};
fn parseCExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node {
// TODO parseCAssignExpr here
const node = try parseCCondExpr(c, m, scope);
if (m.next().? != .Comma) {
m.i -= 1;
return node;
}
var block_scope = try Scope.Block.init(c, scope, true);
defer block_scope.deinit();
var last = node;
while (true) {
// suppress result
const ignore = try Tag.discard.create(c.arena, .{ .should_skip = false, .value = last });
try block_scope.statements.append(ignore);
last = try parseCCondExpr(c, m, scope);
if (m.next().? != .Comma) {
m.i -= 1;
break;
}
}
const break_node = try Tag.break_val.create(c.arena, .{
.label = block_scope.label,
.val = last,
});
try block_scope.statements.append(break_node);
return try block_scope.complete(c);
}
fn parseCNumLit(c: *Context, m: *MacroCtx) ParseError!Node {
var lit_bytes = m.slice();
switch (m.list[m.i].id) {
.IntegerLiteral => |suffix| {
var radix: []const u8 = "decimal";
if (lit_bytes.len > 2 and lit_bytes[0] == '0') {
switch (lit_bytes[1]) {
'0'...'7' => {
// Octal
lit_bytes = try std.fmt.allocPrint(c.arena, "0o{s}", .{lit_bytes[1..]});
radix = "octal";
},
'X' => {
// Hexadecimal with capital X, valid in C but not in Zig
lit_bytes = try std.fmt.allocPrint(c.arena, "0x{s}", .{lit_bytes[2..]});
radix = "hexadecimal";
},
'x' => {
radix = "hexadecimal";
},
else => {},
}
}
const type_node = try Tag.type.create(c.arena, switch (suffix) {
.none => "c_int",
.u => "c_uint",
.l => "c_long",
.lu => "c_ulong",
.ll => "c_longlong",
.llu => "c_ulonglong",
.f => unreachable,
});
lit_bytes = lit_bytes[0 .. lit_bytes.len - switch (suffix) {
.none => @as(u8, 0),
.u, .l => 1,
.lu, .ll => 2,
.llu => 3,
.f => unreachable,
}];
const value = std.fmt.parseInt(i128, lit_bytes, 0) catch math.maxInt(i128);
// make the output less noisy by skipping promoteIntLiteral where
// it's guaranteed to not be required because of C standard type constraints
const guaranteed_to_fit = switch (suffix) {
.none => !meta.isError(math.cast(i16, value)),
.u => !meta.isError(math.cast(u16, value)),
.l => !meta.isError(math.cast(i32, value)),
.lu => !meta.isError(math.cast(u32, value)),
.ll => !meta.isError(math.cast(i64, value)),
.llu => !meta.isError(math.cast(u64, value)),
.f => unreachable,
};
const literal_node = try transCreateNodeNumber(c, lit_bytes, .int);
if (guaranteed_to_fit) {
return Tag.as.create(c.arena, .{ .lhs = type_node, .rhs = literal_node });
} else {
return Tag.helpers_promoteIntLiteral.create(c.arena, .{
.type = type_node,
.value = literal_node,
.radix = try Tag.enum_literal.create(c.arena, radix),
});
}
},
.FloatLiteral => |suffix| {
if (suffix != .none) lit_bytes = lit_bytes[0 .. lit_bytes.len - 1];
const dot_index = mem.indexOfScalar(u8, lit_bytes, '.').?;
if (dot_index == 0) {
lit_bytes = try std.fmt.allocPrint(c.arena, "0{s}", .{lit_bytes});
} else if (dot_index + 1 == lit_bytes.len or !std.ascii.isDigit(lit_bytes[dot_index + 1])) {
// If the literal lacks a digit after the `.`, we need to
// add one since `1.` or `1.e10` would be invalid syntax in Zig.
lit_bytes = try std.fmt.allocPrint(c.arena, "{s}0{s}", .{
lit_bytes[0 .. dot_index + 1],
lit_bytes[dot_index + 1 ..],
});
}
if (suffix == .none)
return transCreateNodeNumber(c, lit_bytes, .float);
const type_node = try Tag.type.create(c.arena, switch (suffix) {
.f => "f32",
.l => "c_longdouble",
else => unreachable,
});
const rhs = try transCreateNodeNumber(c, lit_bytes, .float);
return Tag.as.create(c.arena, .{ .lhs = type_node, .rhs = rhs });
},
else => unreachable,
}
}
fn zigifyEscapeSequences(ctx: *Context, m: *MacroCtx) ![]const u8 {
var source = m.slice();
for (source) |c, i| {
if (c == '\"' or c == '\'') {
source = source[i..];
break;
}
}
for (source) |c| {
if (c == '\\') {
break;
}
} else return source;
var bytes = try ctx.arena.alloc(u8, source.len * 2);
var state: enum {
Start,
Escape,
Hex,
Octal,
} = .Start;
var i: usize = 0;
var count: u8 = 0;
var num: u8 = 0;
for (source) |c| {
switch (state) {
.Escape => {
switch (c) {
'n', 'r', 't', '\\', '\'', '\"' => {
bytes[i] = c;
},
'0'...'7' => {
count += 1;
num += c - '0';
state = .Octal;
bytes[i] = 'x';
},
'x' => {
state = .Hex;
bytes[i] = 'x';
},
'a' => {
bytes[i] = 'x';
i += 1;
bytes[i] = '0';
i += 1;
bytes[i] = '7';
},
'b' => {
bytes[i] = 'x';
i += 1;
bytes[i] = '0';
i += 1;
bytes[i] = '8';
},
'f' => {
bytes[i] = 'x';
i += 1;
bytes[i] = '0';
i += 1;
bytes[i] = 'C';
},
'v' => {
bytes[i] = 'x';
i += 1;
bytes[i] = '0';
i += 1;
bytes[i] = 'B';
},
'?' => {
i -= 1;
bytes[i] = '?';
},
'u', 'U' => {
try m.fail(ctx, "macro tokenizing failed: TODO unicode escape sequences", .{});
return error.ParseError;
},
else => {
try m.fail(ctx, "macro tokenizing failed: unknown escape sequence", .{});
return error.ParseError;
},
}
i += 1;
if (state == .Escape)
state = .Start;
},
.Start => {
if (c == '\\') {
state = .Escape;
}
bytes[i] = c;
i += 1;
},
.Hex => {
switch (c) {
'0'...'9' => {
num = std.math.mul(u8, num, 16) catch {
try m.fail(ctx, "macro tokenizing failed: hex literal overflowed", .{});
return error.ParseError;
};
num += c - '0';
},
'a'...'f' => {
num = std.math.mul(u8, num, 16) catch {
try m.fail(ctx, "macro tokenizing failed: hex literal overflowed", .{});
return error.ParseError;
};
num += c - 'a' + 10;
},
'A'...'F' => {
num = std.math.mul(u8, num, 16) catch {
try m.fail(ctx, "macro tokenizing failed: hex literal overflowed", .{});
return error.ParseError;
};
num += c - 'A' + 10;
},
else => {
i += std.fmt.formatIntBuf(bytes[i..], num, 16, .lower, std.fmt.FormatOptions{ .fill = '0', .width = 2 });
num = 0;
if (c == '\\')
state = .Escape
else
state = .Start;
bytes[i] = c;
i += 1;
},
}
},
.Octal => {
const accept_digit = switch (c) {
// The maximum length of a octal literal is 3 digits
'0'...'7' => count < 3,
else => false,
};
if (accept_digit) {
count += 1;
num = std.math.mul(u8, num, 8) catch {
try m.fail(ctx, "macro tokenizing failed: octal literal overflowed", .{});
return error.ParseError;
};
num += c - '0';
} else {
i += std.fmt.formatIntBuf(bytes[i..], num, 16, .lower, std.fmt.FormatOptions{ .fill = '0', .width = 2 });
num = 0;
count = 0;
if (c == '\\')
state = .Escape
else
state = .Start;
bytes[i] = c;
i += 1;
}
},
}
}
if (state == .Hex or state == .Octal)
i += std.fmt.formatIntBuf(bytes[i..], num, 16, .lower, std.fmt.FormatOptions{ .fill = '0', .width = 2 });
return bytes[0..i];
}
fn parseCPrimaryExprInner(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node {
const tok = m.next().?;
const slice = m.slice();
switch (tok) {
.CharLiteral => {
if (slice[0] != '\'' or slice[1] == '\\' or slice.len == 3) {
return Tag.char_literal.create(c.arena, try zigifyEscapeSequences(c, m));
} else {
const str = try std.fmt.allocPrint(c.arena, "0x{s}", .{std.fmt.fmtSliceHexLower(slice[1 .. slice.len - 1])});
return Tag.integer_literal.create(c.arena, str);
}
},
.StringLiteral => {
return Tag.string_literal.create(c.arena, try zigifyEscapeSequences(c, m));
},
.IntegerLiteral, .FloatLiteral => {
return parseCNumLit(c, m);
},
.Identifier => {
const mangled_name = scope.getAlias(slice);
if (mem.startsWith(u8, mangled_name, "__builtin_") and !isBuiltinDefined(mangled_name)) {
try m.fail(c, "TODO implement function '{s}' in std.zig.c_builtins", .{mangled_name});
return error.ParseError;
}
if (builtin_typedef_map.get(mangled_name)) |ty| return Tag.type.create(c.arena, ty);
const identifier = try Tag.identifier.create(c.arena, mangled_name);
scope.skipVariableDiscard(identifier.castTag(.identifier).?.data);
return identifier;
},
.LParen => {
const inner_node = try parseCExpr(c, m, scope);
const next_id = m.next().?;
if (next_id != .RParen) {
try m.fail(c, "unable to translate C expr: expected ')' instead got: {s}", .{@tagName(next_id)});
return error.ParseError;
}
return inner_node;
},
else => {
// for handling type macros (EVIL)
// TODO maybe detect and treat type macros as typedefs in parseCSpecifierQualifierList?
m.i -= 1;
if (try parseCTypeName(c, m, scope, true)) |type_name| {
return type_name;
}
try m.fail(c, "unable to translate C expr: unexpected token .{s}", .{@tagName(tok)});
return error.ParseError;
},
}
}
fn parseCPrimaryExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node {
var node = try parseCPrimaryExprInner(c, m, scope);
// In C the preprocessor would handle concatting strings while expanding macros.
// This should do approximately the same by concatting any strings and identifiers
// after a primary expression.
while (true) {
switch (m.peek().?) {
.StringLiteral, .Identifier => {},
else => break,
}
node = try Tag.array_cat.create(c.arena, .{ .lhs = node, .rhs = try parseCPrimaryExprInner(c, m, scope) });
}
return node;
}
fn macroBoolToInt(c: *Context, node: Node) !Node {
if (!isBoolRes(node)) {
return node;
}
return Tag.bool_to_int.create(c.arena, node);
}
fn macroIntToBool(c: *Context, node: Node) !Node {
if (isBoolRes(node)) {
return node;
}
return Tag.not_equal.create(c.arena, .{ .lhs = node, .rhs = Tag.zero_literal.init() });
}
fn parseCCondExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node {
const node = try parseCOrExpr(c, m, scope);
if (m.peek().? != .QuestionMark) {
return node;
}
_ = m.next();
const then_body = try parseCOrExpr(c, m, scope);
if (m.next().? != .Colon) {
try m.fail(c, "unable to translate C expr: expected ':'", .{});
return error.ParseError;
}
const else_body = try parseCCondExpr(c, m, scope);
return Tag.@"if".create(c.arena, .{ .cond = node, .then = then_body, .@"else" = else_body });
}
fn parseCOrExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node {
var node = try parseCAndExpr(c, m, scope);
while (m.next().? == .PipePipe) {
const lhs = try macroIntToBool(c, node);
const rhs = try macroIntToBool(c, try parseCAndExpr(c, m, scope));
node = try Tag.@"or".create(c.arena, .{ .lhs = lhs, .rhs = rhs });
}
m.i -= 1;
return node;
}
fn parseCAndExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node {
var node = try parseCBitOrExpr(c, m, scope);
while (m.next().? == .AmpersandAmpersand) {
const lhs = try macroIntToBool(c, node);
const rhs = try macroIntToBool(c, try parseCBitOrExpr(c, m, scope));
node = try Tag.@"and".create(c.arena, .{ .lhs = lhs, .rhs = rhs });
}
m.i -= 1;
return node;
}
fn parseCBitOrExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node {
var node = try parseCBitXorExpr(c, m, scope);
while (m.next().? == .Pipe) {
const lhs = try macroBoolToInt(c, node);
const rhs = try macroBoolToInt(c, try parseCBitXorExpr(c, m, scope));
node = try Tag.bit_or.create(c.arena, .{ .lhs = lhs, .rhs = rhs });
}
m.i -= 1;
return node;
}
fn parseCBitXorExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node {
var node = try parseCBitAndExpr(c, m, scope);
while (m.next().? == .Caret) {
const lhs = try macroBoolToInt(c, node);
const rhs = try macroBoolToInt(c, try parseCBitAndExpr(c, m, scope));
node = try Tag.bit_xor.create(c.arena, .{ .lhs = lhs, .rhs = rhs });
}
m.i -= 1;
return node;
}
fn parseCBitAndExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node {
var node = try parseCEqExpr(c, m, scope);
while (m.next().? == .Ampersand) {
const lhs = try macroBoolToInt(c, node);
const rhs = try macroBoolToInt(c, try parseCEqExpr(c, m, scope));
node = try Tag.bit_and.create(c.arena, .{ .lhs = lhs, .rhs = rhs });
}
m.i -= 1;
return node;
}
fn parseCEqExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node {
var node = try parseCRelExpr(c, m, scope);
while (true) {
switch (m.peek().?) {
.BangEqual => {
_ = m.next();
const lhs = try macroBoolToInt(c, node);
const rhs = try macroBoolToInt(c, try parseCRelExpr(c, m, scope));
node = try Tag.not_equal.create(c.arena, .{ .lhs = lhs, .rhs = rhs });
},
.EqualEqual => {
_ = m.next();
const lhs = try macroBoolToInt(c, node);
const rhs = try macroBoolToInt(c, try parseCRelExpr(c, m, scope));
node = try Tag.equal.create(c.arena, .{ .lhs = lhs, .rhs = rhs });
},
else => return node,
}
}
}
fn parseCRelExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node {
var node = try parseCShiftExpr(c, m, scope);
while (true) {
switch (m.peek().?) {
.AngleBracketRight => {
_ = m.next();
const lhs = try macroBoolToInt(c, node);
const rhs = try macroBoolToInt(c, try parseCShiftExpr(c, m, scope));
node = try Tag.greater_than.create(c.arena, .{ .lhs = lhs, .rhs = rhs });
},
.AngleBracketRightEqual => {
_ = m.next();
const lhs = try macroBoolToInt(c, node);
const rhs = try macroBoolToInt(c, try parseCShiftExpr(c, m, scope));
node = try Tag.greater_than_equal.create(c.arena, .{ .lhs = lhs, .rhs = rhs });
},
.AngleBracketLeft => {
_ = m.next();
const lhs = try macroBoolToInt(c, node);
const rhs = try macroBoolToInt(c, try parseCShiftExpr(c, m, scope));
node = try Tag.less_than.create(c.arena, .{ .lhs = lhs, .rhs = rhs });
},
.AngleBracketLeftEqual => {
_ = m.next();
const lhs = try macroBoolToInt(c, node);
const rhs = try macroBoolToInt(c, try parseCShiftExpr(c, m, scope));
node = try Tag.less_than_equal.create(c.arena, .{ .lhs = lhs, .rhs = rhs });
},
else => return node,
}
}
}
fn parseCShiftExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node {
var node = try parseCAddSubExpr(c, m, scope);
while (true) {
switch (m.peek().?) {
.AngleBracketAngleBracketLeft => {
_ = m.next();
const lhs = try macroBoolToInt(c, node);
const rhs = try macroBoolToInt(c, try parseCAddSubExpr(c, m, scope));
node = try Tag.shl.create(c.arena, .{ .lhs = lhs, .rhs = rhs });
},
.AngleBracketAngleBracketRight => {
_ = m.next();
const lhs = try macroBoolToInt(c, node);
const rhs = try macroBoolToInt(c, try parseCAddSubExpr(c, m, scope));
node = try Tag.shr.create(c.arena, .{ .lhs = lhs, .rhs = rhs });
},
else => return node,
}
}
}
fn parseCAddSubExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node {
var node = try parseCMulExpr(c, m, scope);
while (true) {
switch (m.peek().?) {
.Plus => {
_ = m.next();
const lhs = try macroBoolToInt(c, node);
const rhs = try macroBoolToInt(c, try parseCMulExpr(c, m, scope));
node = try Tag.add.create(c.arena, .{ .lhs = lhs, .rhs = rhs });
},
.Minus => {
_ = m.next();
const lhs = try macroBoolToInt(c, node);
const rhs = try macroBoolToInt(c, try parseCMulExpr(c, m, scope));
node = try Tag.sub.create(c.arena, .{ .lhs = lhs, .rhs = rhs });
},
else => return node,
}
}
}
fn parseCMulExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node {
var node = try parseCCastExpr(c, m, scope);
while (true) {
switch (m.next().?) {
.Asterisk => {
const lhs = try macroBoolToInt(c, node);
const rhs = try macroBoolToInt(c, try parseCCastExpr(c, m, scope));
node = try Tag.mul.create(c.arena, .{ .lhs = lhs, .rhs = rhs });
},
.Slash => {
const lhs = try macroBoolToInt(c, node);
const rhs = try macroBoolToInt(c, try parseCCastExpr(c, m, scope));
node = try Tag.div.create(c.arena, .{ .lhs = lhs, .rhs = rhs });
},
.Percent => {
const lhs = try macroBoolToInt(c, node);
const rhs = try macroBoolToInt(c, try parseCCastExpr(c, m, scope));
node = try Tag.mod.create(c.arena, .{ .lhs = lhs, .rhs = rhs });
},
else => {
m.i -= 1;
return node;
},
}
}
}
fn parseCCastExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node {
switch (m.next().?) {
.LParen => {
if (try parseCTypeName(c, m, scope, true)) |type_name| {
if (m.next().? != .RParen) {
try m.fail(c, "unable to translate C expr: expected ')'", .{});
return error.ParseError;
}
if (m.peek().? == .LBrace) {
// initializer list
return parseCPostfixExpr(c, m, scope, type_name);
}
const node_to_cast = try parseCCastExpr(c, m, scope);
return Tag.helpers_cast.create(c.arena, .{ .lhs = type_name, .rhs = node_to_cast });
}
},
else => {},
}
m.i -= 1;
return parseCUnaryExpr(c, m, scope);
}
// allow_fail is set when unsure if we are parsing a type-name
fn parseCTypeName(c: *Context, m: *MacroCtx, scope: *Scope, allow_fail: bool) ParseError!?Node {
if (try parseCSpecifierQualifierList(c, m, scope, allow_fail)) |node| {
return try parseCAbstractDeclarator(c, m, scope, node);
} else {
return null;
}
}
fn parseCSpecifierQualifierList(c: *Context, m: *MacroCtx, scope: *Scope, allow_fail: bool) ParseError!?Node {
const tok = m.next().?;
switch (tok) {
.Identifier => {
const mangled_name = scope.getAlias(m.slice());
if (!allow_fail or c.typedefs.contains(mangled_name)) {
if (builtin_typedef_map.get(mangled_name)) |ty| return try Tag.type.create(c.arena, ty);
return try Tag.identifier.create(c.arena, mangled_name);
}
},
.Keyword_void => return try Tag.type.create(c.arena, "c_void"),
.Keyword_bool => return try Tag.type.create(c.arena, "bool"),
.Keyword_char,
.Keyword_int,
.Keyword_short,
.Keyword_long,
.Keyword_float,
.Keyword_double,
.Keyword_signed,
.Keyword_unsigned,
.Keyword_complex,
=> {
m.i -= 1;
return try parseCNumericType(c, m, scope);
},
.Keyword_enum, .Keyword_struct, .Keyword_union => {
// struct Foo will be declared as struct_Foo by transRecordDecl
const slice = m.slice();
const next_id = m.next().?;
if (next_id != .Identifier) {
try m.fail(c, "unable to translate C expr: expected Identifier instead got: {s}", .{@tagName(next_id)});
return error.ParseError;
}
const name = try std.fmt.allocPrint(c.arena, "{s}_{s}", .{ slice, m.slice() });
return try Tag.identifier.create(c.arena, name);
},
else => {},
}
if (allow_fail) {
m.i -= 1;
return null;
} else {
try m.fail(c, "unable to translate C expr: unexpected token .{s}", .{@tagName(tok)});
return error.ParseError;
}
}
fn parseCNumericType(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node {
_ = scope;
const KwCounter = struct {
double: u8 = 0,
long: u8 = 0,
int: u8 = 0,
float: u8 = 0,
short: u8 = 0,
char: u8 = 0,
unsigned: u8 = 0,
signed: u8 = 0,
complex: u8 = 0,
fn eql(self: @This(), other: @This()) bool {
return meta.eql(self, other);
}
};
// Yes, these can be in *any* order
// This still doesn't cover cases where for example volatile is intermixed
var kw = KwCounter{};
// prevent overflow
var i: u8 = 0;
while (i < math.maxInt(u8)) : (i += 1) {
switch (m.next().?) {
.Keyword_double => kw.double += 1,
.Keyword_long => kw.long += 1,
.Keyword_int => kw.int += 1,
.Keyword_float => kw.float += 1,
.Keyword_short => kw.short += 1,
.Keyword_char => kw.char += 1,
.Keyword_unsigned => kw.unsigned += 1,
.Keyword_signed => kw.signed += 1,
.Keyword_complex => kw.complex += 1,
else => {
m.i -= 1;
break;
},
}
}
if (kw.eql(.{ .int = 1 }) or kw.eql(.{ .signed = 1 }) or kw.eql(.{ .signed = 1, .int = 1 }))
return Tag.type.create(c.arena, "c_int");
if (kw.eql(.{ .unsigned = 1 }) or kw.eql(.{ .unsigned = 1, .int = 1 }))
return Tag.type.create(c.arena, "c_uint");
if (kw.eql(.{ .long = 1 }) or kw.eql(.{ .signed = 1, .long = 1 }) or kw.eql(.{ .long = 1, .int = 1 }) or kw.eql(.{ .signed = 1, .long = 1, .int = 1 }))
return Tag.type.create(c.arena, "c_long");
if (kw.eql(.{ .unsigned = 1, .long = 1 }) or kw.eql(.{ .unsigned = 1, .long = 1, .int = 1 }))
return Tag.type.create(c.arena, "c_ulong");
if (kw.eql(.{ .long = 2 }) or kw.eql(.{ .signed = 1, .long = 2 }) or kw.eql(.{ .long = 2, .int = 1 }) or kw.eql(.{ .signed = 1, .long = 2, .int = 1 }))
return Tag.type.create(c.arena, "c_longlong");
if (kw.eql(.{ .unsigned = 1, .long = 2 }) or kw.eql(.{ .unsigned = 1, .long = 2, .int = 1 }))
return Tag.type.create(c.arena, "c_ulonglong");
if (kw.eql(.{ .signed = 1, .char = 1 }))
return Tag.type.create(c.arena, "i8");
if (kw.eql(.{ .char = 1 }) or kw.eql(.{ .unsigned = 1, .char = 1 }))
return Tag.type.create(c.arena, "u8");
if (kw.eql(.{ .short = 1 }) or kw.eql(.{ .signed = 1, .short = 1 }) or kw.eql(.{ .short = 1, .int = 1 }) or kw.eql(.{ .signed = 1, .short = 1, .int = 1 }))
return Tag.type.create(c.arena, "c_short");
if (kw.eql(.{ .unsigned = 1, .short = 1 }) or kw.eql(.{ .unsigned = 1, .short = 1, .int = 1 }))
return Tag.type.create(c.arena, "c_ushort");
if (kw.eql(.{ .float = 1 }))
return Tag.type.create(c.arena, "f32");
if (kw.eql(.{ .double = 1 }))
return Tag.type.create(c.arena, "f64");
if (kw.eql(.{ .long = 1, .double = 1 })) {
try m.fail(c, "unable to translate: TODO long double", .{});
return error.ParseError;
}
if (kw.eql(.{ .float = 1, .complex = 1 })) {
try m.fail(c, "unable to translate: TODO _Complex", .{});
return error.ParseError;
}
if (kw.eql(.{ .double = 1, .complex = 1 })) {
try m.fail(c, "unable to translate: TODO _Complex", .{});
return error.ParseError;
}
if (kw.eql(.{ .long = 1, .double = 1, .complex = 1 })) {
try m.fail(c, "unable to translate: TODO _Complex", .{});
return error.ParseError;
}
try m.fail(c, "unable to translate: invalid numeric type", .{});
return error.ParseError;
}
fn parseCAbstractDeclarator(c: *Context, m: *MacroCtx, scope: *Scope, node: Node) ParseError!Node {
_ = scope;
switch (m.next().?) {
.Asterisk => {
// last token of `node`
const prev_id = m.list[m.i - 1].id;
if (prev_id == .Keyword_void) {
const ptr = try Tag.single_pointer.create(c.arena, .{
.is_const = false,
.is_volatile = false,
.elem_type = node,
});
return Tag.optional_type.create(c.arena, ptr);
} else {
return Tag.c_pointer.create(c.arena, .{
.is_const = false,
.is_volatile = false,
.elem_type = node,
});
}
},
else => {
m.i -= 1;
return node;
},
}
}
fn parseCPostfixExpr(c: *Context, m: *MacroCtx, scope: *Scope, type_name: ?Node) ParseError!Node {
var node = type_name orelse try parseCPrimaryExpr(c, m, scope);
while (true) {
switch (m.next().?) {
.Period => {
if (m.next().? != .Identifier) {
try m.fail(c, "unable to translate C expr: expected identifier", .{});
return error.ParseError;
}
node = try Tag.field_access.create(c.arena, .{ .lhs = node, .field_name = m.slice() });
},
.Arrow => {
if (m.next().? != .Identifier) {
try m.fail(c, "unable to translate C expr: expected identifier", .{});
return error.ParseError;
}
const deref = try Tag.deref.create(c.arena, node);
node = try Tag.field_access.create(c.arena, .{ .lhs = deref, .field_name = m.slice() });
},
.LBracket => {
const index = try macroBoolToInt(c, try parseCExpr(c, m, scope));
node = try Tag.array_access.create(c.arena, .{ .lhs = node, .rhs = index });
if (m.next().? != .RBracket) {
try m.fail(c, "unable to translate C expr: expected ']'", .{});
return error.ParseError;
}
},
.LParen => {
if (m.peek().? == .RParen) {
m.i += 1;
node = try Tag.call.create(c.arena, .{ .lhs = node, .args = &[0]Node{} });
} else {
var args = std.ArrayList(Node).init(c.gpa);
defer args.deinit();
while (true) {
const arg = try parseCCondExpr(c, m, scope);
try args.append(arg);
switch (m.next().?) {
.Comma => {},
.RParen => break,
else => {
try m.fail(c, "unable to translate C expr: expected ',' or ')'", .{});
return error.ParseError;
},
}
}
node = try Tag.call.create(c.arena, .{ .lhs = node, .args = try c.arena.dupe(Node, args.items) });
}
},
.LBrace => {
// Check for designated field initializers
if (m.peek().? == .Period) {
var init_vals = std.ArrayList(ast.Payload.ContainerInitDot.Initializer).init(c.gpa);
defer init_vals.deinit();
while (true) {
if (m.next().? != .Period) {
try m.fail(c, "unable to translate C expr: expected '.'", .{});
return error.ParseError;
}
if (m.next().? != .Identifier) {
try m.fail(c, "unable to translate C expr: expected identifier", .{});
return error.ParseError;
}
const name = m.slice();
if (m.next().? != .Equal) {
try m.fail(c, "unable to translate C expr: expected '='", .{});
return error.ParseError;
}
const val = try parseCCondExpr(c, m, scope);
try init_vals.append(.{ .name = name, .value = val });
switch (m.next().?) {
.Comma => {},
.RBrace => break,
else => {
try m.fail(c, "unable to translate C expr: expected ',' or '}}'", .{});
return error.ParseError;
},
}
}
const tuple_node = try Tag.container_init_dot.create(c.arena, try c.arena.dupe(ast.Payload.ContainerInitDot.Initializer, init_vals.items));
node = try Tag.std_mem_zeroinit.create(c.arena, .{ .lhs = node, .rhs = tuple_node });
continue;
}
var init_vals = std.ArrayList(Node).init(c.gpa);
defer init_vals.deinit();
while (true) {
const val = try parseCCondExpr(c, m, scope);
try init_vals.append(val);
switch (m.next().?) {
.Comma => {},
.RBrace => break,
else => {
try m.fail(c, "unable to translate C expr: expected ',' or '}}'", .{});
return error.ParseError;
},
}
}
const tuple_node = try Tag.tuple.create(c.arena, try c.arena.dupe(Node, init_vals.items));
node = try Tag.std_mem_zeroinit.create(c.arena, .{ .lhs = node, .rhs = tuple_node });
},
.PlusPlus, .MinusMinus => {
try m.fail(c, "TODO postfix inc/dec expr", .{});
return error.ParseError;
},
else => {
m.i -= 1;
return node;
},
}
}
}
fn parseCUnaryExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node {
switch (m.next().?) {
.Bang => {
const operand = try macroIntToBool(c, try parseCCastExpr(c, m, scope));
return Tag.not.create(c.arena, operand);
},
.Minus => {
const operand = try macroBoolToInt(c, try parseCCastExpr(c, m, scope));
return Tag.negate.create(c.arena, operand);
},
.Plus => return try parseCCastExpr(c, m, scope),
.Tilde => {
const operand = try macroBoolToInt(c, try parseCCastExpr(c, m, scope));
return Tag.bit_not.create(c.arena, operand);
},
.Asterisk => {
const operand = try parseCCastExpr(c, m, scope);
return Tag.deref.create(c.arena, operand);
},
.Ampersand => {
const operand = try parseCCastExpr(c, m, scope);
return Tag.address_of.create(c.arena, operand);
},
.Keyword_sizeof => {
const operand = if (m.peek().? == .LParen) blk: {
_ = m.next();
const inner = (try parseCTypeName(c, m, scope, false)).?;
if (m.next().? != .RParen) {
try m.fail(c, "unable to translate C expr: expected ')'", .{});
return error.ParseError;
}
break :blk inner;
} else try parseCUnaryExpr(c, m, scope);
return Tag.helpers_sizeof.create(c.arena, operand);
},
.Keyword_alignof => {
// TODO this won't work if using <stdalign.h>'s
// #define alignof _Alignof
if (m.next().? != .LParen) {
try m.fail(c, "unable to translate C expr: expected '('", .{});
return error.ParseError;
}
const operand = (try parseCTypeName(c, m, scope, false)).?;
if (m.next().? != .RParen) {
try m.fail(c, "unable to translate C expr: expected ')'", .{});
return error.ParseError;
}
return Tag.alignof.create(c.arena, operand);
},
.PlusPlus, .MinusMinus => {
try m.fail(c, "TODO unary inc/dec expr", .{});
return error.ParseError;
},
else => {
m.i -= 1;
return try parseCPostfixExpr(c, m, scope, null);
},
}
}
fn getContainer(c: *Context, node: Node) ?Node {
switch (node.tag()) {
.@"union",
.@"struct",
.address_of,
.bit_not,
.not,
.optional_type,
.negate,
.negate_wrap,
.array_type,
.c_pointer,
.single_pointer,
=> return node,
.identifier => {
const ident = node.castTag(.identifier).?;
if (c.global_scope.sym_table.get(ident.data)) |value| {
if (value.castTag(.var_decl)) |var_decl|
return getContainer(c, var_decl.data.init.?);
if (value.castTag(.var_simple) orelse value.castTag(.pub_var_simple)) |var_decl|
return getContainer(c, var_decl.data.init);
}
},
.field_access => {
const field_access = node.castTag(.field_access).?;
if (getContainerTypeOf(c, field_access.data.lhs)) |ty_node| {
if (ty_node.castTag(.@"struct") orelse ty_node.castTag(.@"union")) |container| {
for (container.data.fields) |field| {
if (mem.eql(u8, field.name, field_access.data.field_name)) {
return getContainer(c, field.type);
}
}
}
}
},
else => {},
}
return null;
}
fn getContainerTypeOf(c: *Context, ref: Node) ?Node {
if (ref.castTag(.identifier)) |ident| {
if (c.global_scope.sym_table.get(ident.data)) |value| {
if (value.castTag(.var_decl)) |var_decl| {
return getContainer(c, var_decl.data.type);
}
}
} else if (ref.castTag(.field_access)) |field_access| {
if (getContainerTypeOf(c, field_access.data.lhs)) |ty_node| {
if (ty_node.castTag(.@"struct") orelse ty_node.castTag(.@"union")) |container| {
for (container.data.fields) |field| {
if (mem.eql(u8, field.name, field_access.data.field_name)) {
return getContainer(c, field.type);
}
}
} else return ty_node;
}
}
return null;
}
fn getFnProto(c: *Context, ref: Node) ?*ast.Payload.Func {
const init = if (ref.castTag(.var_decl)) |v|
v.data.init orelse return null
else if (ref.castTag(.var_simple) orelse ref.castTag(.pub_var_simple)) |v|
v.data.init
else
return null;
if (getContainerTypeOf(c, init)) |ty_node| {
if (ty_node.castTag(.optional_type)) |prefix| {
if (prefix.data.castTag(.func)) |fn_proto| {
return fn_proto;
}
}
}
return null;
}
fn addMacros(c: *Context) !void {
var it = c.global_scope.macro_table.iterator();
while (it.next()) |entry| {
if (getFnProto(c, entry.value_ptr.*)) |proto_node| {
// If a macro aliases a global variable which is a function pointer, we conclude that
// the macro is intended to represent a function that assumes the function pointer
// variable is non-null and calls it.
try addTopLevelDecl(c, entry.key_ptr.*, try transCreateNodeMacroFn(c, entry.key_ptr.*, entry.value_ptr.*, proto_node));
} else {
try addTopLevelDecl(c, entry.key_ptr.*, entry.value_ptr.*);
}
}
} | src/translate_c.zig |
const std = @import("std.zig");
const builtin = std.builtin;
const math = std.math;
const mem = std.mem;
const io = std.io;
const os = std.os;
const fs = std.fs;
const process = std.process;
const elf = std.elf;
const DW = std.dwarf;
const macho = std.macho;
const coff = std.coff;
const pdb = std.pdb;
const ArrayList = std.ArrayList;
const root = @import("root");
const maxInt = std.math.maxInt;
const File = std.fs.File;
const windows = std.os.windows;
pub const runtime_safety = switch (builtin.mode) {
.Debug, .ReleaseSafe => true,
.ReleaseFast, .ReleaseSmall => false,
};
const Module = struct {
mod_info: pdb.ModInfo,
module_name: []u8,
obj_file_name: []u8,
populated: bool,
symbols: []u8,
subsect_info: []u8,
checksum_offset: ?usize,
};
pub const LineInfo = struct {
line: u64,
column: u64,
file_name: []const u8,
allocator: ?*mem.Allocator,
fn deinit(self: LineInfo) void {
const allocator = self.allocator orelse return;
allocator.free(self.file_name);
}
};
var stderr_mutex = std.Mutex{};
/// Deprecated. Use `std.log` functions for logging or `std.debug.print` for
/// "printf debugging".
pub const warn = print;
/// Print to stderr, unbuffered, and silently returning on failure. Intended
/// for use in "printf debugging." Use `std.log` functions for proper logging.
pub fn print(comptime fmt: []const u8, args: anytype) void {
const held = stderr_mutex.acquire();
defer held.release();
const stderr = io.getStdErr().writer();
nosuspend stderr.print(fmt, args) catch return;
}
pub fn getStderrMutex() *std.Mutex {
return &stderr_mutex;
}
/// TODO multithreaded awareness
var self_debug_info: ?DebugInfo = null;
pub fn getSelfDebugInfo() !*DebugInfo {
if (self_debug_info) |*info| {
return info;
} else {
self_debug_info = try openSelfDebugInfo(getDebugInfoAllocator());
return &self_debug_info.?;
}
}
pub fn detectTTYConfig() TTY.Config {
var bytes: [128]u8 = undefined;
const allocator = &std.heap.FixedBufferAllocator.init(bytes[0..]).allocator;
if (process.getEnvVarOwned(allocator, "ZIG_DEBUG_COLOR")) |_| {
return .escape_codes;
} else |_| {
const stderr_file = io.getStdErr();
if (stderr_file.supportsAnsiEscapeCodes()) {
return .escape_codes;
} else if (builtin.os.tag == .windows and stderr_file.isTty()) {
return .windows_api;
} else {
return .no_color;
}
}
}
/// Tries to print the current stack trace to stderr, unbuffered, and ignores any error returned.
/// TODO multithreaded awareness
pub fn dumpCurrentStackTrace(start_addr: ?usize) void {
nosuspend {
const stderr = io.getStdErr().writer();
if (builtin.strip_debug_info) {
stderr.print("Unable to dump stack trace: debug info stripped\n", .{}) catch return;
return;
}
const debug_info = getSelfDebugInfo() catch |err| {
stderr.print("Unable to dump stack trace: Unable to open debug info: {}\n", .{@errorName(err)}) catch return;
return;
};
writeCurrentStackTrace(stderr, debug_info, detectTTYConfig(), start_addr) catch |err| {
stderr.print("Unable to dump stack trace: {}\n", .{@errorName(err)}) catch return;
return;
};
}
}
/// Tries to print the stack trace starting from the supplied base pointer to stderr,
/// unbuffered, and ignores any error returned.
/// TODO multithreaded awareness
pub fn dumpStackTraceFromBase(bp: usize, ip: usize) void {
nosuspend {
const stderr = io.getStdErr().writer();
if (builtin.strip_debug_info) {
stderr.print("Unable to dump stack trace: debug info stripped\n", .{}) catch return;
return;
}
const debug_info = getSelfDebugInfo() catch |err| {
stderr.print("Unable to dump stack trace: Unable to open debug info: {}\n", .{@errorName(err)}) catch return;
return;
};
const tty_config = detectTTYConfig();
printSourceAtAddress(debug_info, stderr, ip, tty_config) catch return;
var it = StackIterator.init(null, bp);
while (it.next()) |return_address| {
printSourceAtAddress(debug_info, stderr, return_address - 1, tty_config) catch return;
}
}
}
/// Returns a slice with the same pointer as addresses, with a potentially smaller len.
/// On Windows, when first_address is not null, we ask for at least 32 stack frames,
/// and then try to find the first address. If addresses.len is more than 32, we
/// capture that many stack frames exactly, and then look for the first address,
/// chopping off the irrelevant frames and shifting so that the returned addresses pointer
/// equals the passed in addresses pointer.
pub fn captureStackTrace(first_address: ?usize, stack_trace: *builtin.StackTrace) void {
if (builtin.os.tag == .windows) {
const addrs = stack_trace.instruction_addresses;
const u32_addrs_len = @intCast(u32, addrs.len);
const first_addr = first_address orelse {
stack_trace.index = windows.ntdll.RtlCaptureStackBackTrace(
0,
u32_addrs_len,
@ptrCast(**c_void, addrs.ptr),
null,
);
return;
};
var addr_buf_stack: [32]usize = undefined;
const addr_buf = if (addr_buf_stack.len > addrs.len) addr_buf_stack[0..] else addrs;
const n = windows.ntdll.RtlCaptureStackBackTrace(0, u32_addrs_len, @ptrCast(**c_void, addr_buf.ptr), null);
const first_index = for (addr_buf[0..n]) |addr, i| {
if (addr == first_addr) {
break i;
}
} else {
stack_trace.index = 0;
return;
};
const slice = addr_buf[first_index..n];
// We use a for loop here because slice and addrs may alias.
for (slice) |addr, i| {
addrs[i] = addr;
}
stack_trace.index = slice.len;
} else {
var it = StackIterator.init(first_address, null);
for (stack_trace.instruction_addresses) |*addr, i| {
addr.* = it.next() orelse {
stack_trace.index = i;
return;
};
}
stack_trace.index = stack_trace.instruction_addresses.len;
}
}
/// Tries to print a stack trace to stderr, unbuffered, and ignores any error returned.
/// TODO multithreaded awareness
pub fn dumpStackTrace(stack_trace: builtin.StackTrace) void {
nosuspend {
const stderr = io.getStdErr().writer();
if (builtin.strip_debug_info) {
stderr.print("Unable to dump stack trace: debug info stripped\n", .{}) catch return;
return;
}
const debug_info = getSelfDebugInfo() catch |err| {
stderr.print("Unable to dump stack trace: Unable to open debug info: {}\n", .{@errorName(err)}) catch return;
return;
};
writeStackTrace(stack_trace, stderr, getDebugInfoAllocator(), debug_info, detectTTYConfig()) catch |err| {
stderr.print("Unable to dump stack trace: {}\n", .{@errorName(err)}) catch return;
return;
};
}
}
/// This function invokes undefined behavior when `ok` is `false`.
/// In Debug and ReleaseSafe modes, calls to this function are always
/// generated, and the `unreachable` statement triggers a panic.
/// In ReleaseFast and ReleaseSmall modes, calls to this function are
/// optimized away, and in fact the optimizer is able to use the assertion
/// in its heuristics.
/// Inside a test block, it is best to use the `std.testing` module rather
/// than this function, because this function may not detect a test failure
/// in ReleaseFast and ReleaseSmall mode. Outside of a test block, this assert
/// function is the correct function to use.
pub fn assert(ok: bool) void {
if (!ok) unreachable; // assertion failure
}
pub fn panic(comptime format: []const u8, args: anytype) noreturn {
@setCold(true);
// TODO: remove conditional once wasi / LLVM defines __builtin_return_address
const first_trace_addr = if (builtin.os.tag == .wasi) null else @returnAddress();
panicExtra(null, first_trace_addr, format, args);
}
/// Non-zero whenever the program triggered a panic.
/// The counter is incremented/decremented atomically.
var panicking: u8 = 0;
// Locked to avoid interleaving panic messages from multiple threads.
var panic_mutex = std.Mutex{};
/// Counts how many times the panic handler is invoked by this thread.
/// This is used to catch and handle panics triggered by the panic handler.
threadlocal var panic_stage: usize = 0;
pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, comptime format: []const u8, args: anytype) noreturn {
@setCold(true);
if (enable_segfault_handler) {
// If a segfault happens while panicking, we want it to actually segfault, not trigger
// the handler.
resetSegfaultHandler();
}
nosuspend switch (panic_stage) {
0 => {
panic_stage = 1;
_ = @atomicRmw(u8, &panicking, .Add, 1, .SeqCst);
// Make sure to release the mutex when done
{
const held = panic_mutex.acquire();
defer held.release();
const stderr = io.getStdErr().writer();
stderr.print(format ++ "\n", args) catch os.abort();
if (trace) |t| {
dumpStackTrace(t.*);
}
dumpCurrentStackTrace(first_trace_addr);
}
if (@atomicRmw(u8, &panicking, .Sub, 1, .SeqCst) != 1) {
// Another thread is panicking, wait for the last one to finish
// and call abort()
// Sleep forever without hammering the CPU
var event = std.ResetEvent.init();
event.wait();
unreachable;
}
},
1 => {
panic_stage = 2;
// A panic happened while trying to print a previous panic message,
// we're still holding the mutex but that's fine as we're going to
// call abort()
const stderr = io.getStdErr().writer();
stderr.print("Panicked during a panic. Aborting.\n", .{}) catch os.abort();
},
else => {
// Panicked while printing "Panicked during a panic."
},
};
os.abort();
}
const RED = "\x1b[31;1m";
const GREEN = "\x1b[32;1m";
const CYAN = "\x1b[36;1m";
const WHITE = "\x1b[37;1m";
const DIM = "\x1b[2m";
const RESET = "\x1b[0m";
pub fn writeStackTrace(
stack_trace: builtin.StackTrace,
out_stream: anytype,
allocator: *mem.Allocator,
debug_info: *DebugInfo,
tty_config: TTY.Config,
) !void {
if (builtin.strip_debug_info) return error.MissingDebugInfo;
var frame_index: usize = 0;
var frames_left: usize = std.math.min(stack_trace.index, stack_trace.instruction_addresses.len);
while (frames_left != 0) : ({
frames_left -= 1;
frame_index = (frame_index + 1) % stack_trace.instruction_addresses.len;
}) {
const return_address = stack_trace.instruction_addresses[frame_index];
try printSourceAtAddress(debug_info, out_stream, return_address - 1, tty_config);
}
}
pub const StackIterator = struct {
// Skip every frame before this address is found.
first_address: ?usize,
// Last known value of the frame pointer register.
fp: usize,
pub fn init(first_address: ?usize, fp: ?usize) StackIterator {
return StackIterator{
.first_address = first_address,
.fp = fp orelse @frameAddress(),
};
}
// Negative offset of the saved BP wrt the frame pointer.
const fp_offset = if (builtin.arch.isRISCV())
// On RISC-V the frame pointer points to the top of the saved register
// area, on pretty much every other architecture it points to the stack
// slot where the previous frame pointer is saved.
2 * @sizeOf(usize)
else
0;
// Positive offset of the saved PC wrt the frame pointer.
const pc_offset = if (builtin.arch == .powerpc64le)
2 * @sizeOf(usize)
else
@sizeOf(usize);
pub fn next(self: *StackIterator) ?usize {
var address = self.next_internal() orelse return null;
if (self.first_address) |first_address| {
while (address != first_address) {
address = self.next_internal() orelse return null;
}
self.first_address = null;
}
return address;
}
fn next_internal(self: *StackIterator) ?usize {
const fp = math.sub(usize, self.fp, fp_offset) catch return null;
// Sanity check.
if (fp == 0 or !mem.isAligned(fp, @alignOf(usize)))
return null;
const new_fp = @intToPtr(*const usize, fp).*;
// Sanity check: the stack grows down thus all the parent frames must be
// be at addresses that are greater (or equal) than the previous one.
// A zero frame pointer often signals this is the last frame, that case
// is gracefully handled by the next call to next_internal.
if (new_fp != 0 and new_fp < self.fp)
return null;
const new_pc = @intToPtr(
*const usize,
math.add(usize, fp, pc_offset) catch return null,
).*;
self.fp = new_fp;
return new_pc;
}
};
pub fn writeCurrentStackTrace(
out_stream: anytype,
debug_info: *DebugInfo,
tty_config: TTY.Config,
start_addr: ?usize,
) !void {
if (builtin.os.tag == .windows) {
return writeCurrentStackTraceWindows(out_stream, debug_info, tty_config, start_addr);
}
var it = StackIterator.init(start_addr, null);
while (it.next()) |return_address| {
try printSourceAtAddress(debug_info, out_stream, return_address - 1, tty_config);
}
}
pub fn writeCurrentStackTraceWindows(
out_stream: anytype,
debug_info: *DebugInfo,
tty_config: TTY.Config,
start_addr: ?usize,
) !void {
var addr_buf: [1024]usize = undefined;
const n = windows.ntdll.RtlCaptureStackBackTrace(0, addr_buf.len, @ptrCast(**c_void, &addr_buf), null);
const addrs = addr_buf[0..n];
var start_i: usize = if (start_addr) |saddr| blk: {
for (addrs) |addr, i| {
if (addr == saddr) break :blk i;
}
return;
} else 0;
for (addrs[start_i..]) |addr| {
try printSourceAtAddress(debug_info, out_stream, addr - 1, tty_config);
}
}
pub const TTY = struct {
pub const Color = enum {
Red,
Green,
Cyan,
White,
Dim,
Bold,
Reset,
};
pub const Config = enum {
no_color,
escape_codes,
// TODO give this a payload of file handle
windows_api,
fn setColor(conf: Config, out_stream: anytype, color: Color) void {
nosuspend switch (conf) {
.no_color => return,
.escape_codes => switch (color) {
.Red => out_stream.writeAll(RED) catch return,
.Green => out_stream.writeAll(GREEN) catch return,
.Cyan => out_stream.writeAll(CYAN) catch return,
.White, .Bold => out_stream.writeAll(WHITE) catch return,
.Dim => out_stream.writeAll(DIM) catch return,
.Reset => out_stream.writeAll(RESET) catch return,
},
.windows_api => if (builtin.os.tag == .windows) {
const stderr_file = io.getStdErr();
const S = struct {
var attrs: windows.WORD = undefined;
var init_attrs = false;
};
if (!S.init_attrs) {
S.init_attrs = true;
var info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined;
// TODO handle error
_ = windows.kernel32.GetConsoleScreenBufferInfo(stderr_file.handle, &info);
S.attrs = info.wAttributes;
}
// TODO handle errors
switch (color) {
.Red => {
_ = windows.SetConsoleTextAttribute(stderr_file.handle, windows.FOREGROUND_RED | windows.FOREGROUND_INTENSITY) catch {};
},
.Green => {
_ = windows.SetConsoleTextAttribute(stderr_file.handle, windows.FOREGROUND_GREEN | windows.FOREGROUND_INTENSITY) catch {};
},
.Cyan => {
_ = windows.SetConsoleTextAttribute(stderr_file.handle, windows.FOREGROUND_GREEN | windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY) catch {};
},
.White, .Bold => {
_ = windows.SetConsoleTextAttribute(stderr_file.handle, windows.FOREGROUND_RED | windows.FOREGROUND_GREEN | windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY) catch {};
},
.Dim => {
_ = windows.SetConsoleTextAttribute(stderr_file.handle, windows.FOREGROUND_INTENSITY) catch {};
},
.Reset => {
_ = windows.SetConsoleTextAttribute(stderr_file.handle, S.attrs) catch {};
},
}
} else {
unreachable;
},
};
}
};
};
/// TODO resources https://github.com/ziglang/zig/issues/4353
fn populateModule(di: *ModuleDebugInfo, mod: *Module) !void {
if (mod.populated)
return;
const allocator = getDebugInfoAllocator();
// At most one can be non-zero.
if (mod.mod_info.C11ByteSize != 0 and mod.mod_info.C13ByteSize != 0)
return error.InvalidDebugInfo;
if (mod.mod_info.C13ByteSize == 0)
return;
const modi = di.pdb.getStreamById(mod.mod_info.ModuleSymStream) orelse return error.MissingDebugInfo;
const signature = try modi.inStream().readIntLittle(u32);
if (signature != 4)
return error.InvalidDebugInfo;
mod.symbols = try allocator.alloc(u8, mod.mod_info.SymByteSize - 4);
try modi.inStream().readNoEof(mod.symbols);
mod.subsect_info = try allocator.alloc(u8, mod.mod_info.C13ByteSize);
try modi.inStream().readNoEof(mod.subsect_info);
var sect_offset: usize = 0;
var skip_len: usize = undefined;
while (sect_offset != mod.subsect_info.len) : (sect_offset += skip_len) {
const subsect_hdr = @ptrCast(*pdb.DebugSubsectionHeader, &mod.subsect_info[sect_offset]);
skip_len = subsect_hdr.Length;
sect_offset += @sizeOf(pdb.DebugSubsectionHeader);
switch (subsect_hdr.Kind) {
.FileChecksums => {
mod.checksum_offset = sect_offset;
break;
},
else => {},
}
if (sect_offset > mod.subsect_info.len)
return error.InvalidDebugInfo;
}
mod.populated = true;
}
fn machoSearchSymbols(symbols: []const MachoSymbol, address: usize) ?*const MachoSymbol {
var min: usize = 0;
var max: usize = symbols.len - 1; // Exclude sentinel.
while (min < max) {
const mid = min + (max - min) / 2;
const curr = &symbols[mid];
const next = &symbols[mid + 1];
if (address >= next.address()) {
min = mid + 1;
} else if (address < curr.address()) {
max = mid;
} else {
return curr;
}
}
return null;
}
/// TODO resources https://github.com/ziglang/zig/issues/4353
pub fn printSourceAtAddress(debug_info: *DebugInfo, out_stream: anytype, address: usize, tty_config: TTY.Config) !void {
const module = debug_info.getModuleForAddress(address) catch |err| switch (err) {
error.MissingDebugInfo, error.InvalidDebugInfo => {
return printLineInfo(
out_stream,
null,
address,
"???",
"???",
tty_config,
printLineFromFileAnyOs,
);
},
else => return err,
};
const symbol_info = try module.getSymbolAtAddress(address);
defer symbol_info.deinit();
return printLineInfo(
out_stream,
symbol_info.line_info,
address,
symbol_info.symbol_name,
symbol_info.compile_unit_name,
tty_config,
printLineFromFileAnyOs,
);
}
fn printLineInfo(
out_stream: anytype,
line_info: ?LineInfo,
address: usize,
symbol_name: []const u8,
compile_unit_name: []const u8,
tty_config: TTY.Config,
comptime printLineFromFile: anytype,
) !void {
nosuspend {
tty_config.setColor(out_stream, .White);
if (line_info) |*li| {
try out_stream.print("{}:{}:{}", .{ li.file_name, li.line, li.column });
} else {
try out_stream.writeAll("???:?:?");
}
tty_config.setColor(out_stream, .Reset);
try out_stream.writeAll(": ");
tty_config.setColor(out_stream, .Dim);
try out_stream.print("0x{x} in {} ({})", .{ address, symbol_name, compile_unit_name });
tty_config.setColor(out_stream, .Reset);
try out_stream.writeAll("\n");
// Show the matching source code line if possible
if (line_info) |li| {
if (printLineFromFile(out_stream, li)) {
if (li.column > 0) {
// The caret already takes one char
const space_needed = @intCast(usize, li.column - 1);
try out_stream.writeByteNTimes(' ', space_needed);
tty_config.setColor(out_stream, .Green);
try out_stream.writeAll("^");
tty_config.setColor(out_stream, .Reset);
}
try out_stream.writeAll("\n");
} else |err| switch (err) {
error.EndOfFile, error.FileNotFound => {},
error.BadPathName => {},
else => return err,
}
}
}
}
// TODO use this
pub const OpenSelfDebugInfoError = error{
MissingDebugInfo,
OutOfMemory,
UnsupportedOperatingSystem,
};
/// TODO resources https://github.com/ziglang/zig/issues/4353
pub fn openSelfDebugInfo(allocator: *mem.Allocator) anyerror!DebugInfo {
nosuspend {
if (builtin.strip_debug_info)
return error.MissingDebugInfo;
if (@hasDecl(root, "os") and @hasDecl(root.os, "debug") and @hasDecl(root.os.debug, "openSelfDebugInfo")) {
return root.os.debug.openSelfDebugInfo(allocator);
}
switch (builtin.os.tag) {
.linux,
.freebsd,
.netbsd,
.dragonfly,
.openbsd,
.macos,
.windows,
=> return DebugInfo.init(allocator),
else => return error.UnsupportedDebugInfo,
}
}
}
/// This takes ownership of coff_file: users of this function should not close
/// it themselves, even on error.
/// TODO resources https://github.com/ziglang/zig/issues/4353
/// TODO it's weird to take ownership even on error, rework this code.
fn readCoffDebugInfo(allocator: *mem.Allocator, coff_file: File) !ModuleDebugInfo {
nosuspend {
errdefer coff_file.close();
const coff_obj = try allocator.create(coff.Coff);
coff_obj.* = coff.Coff.init(allocator, coff_file);
var di = ModuleDebugInfo{
.base_address = undefined,
.coff = coff_obj,
.pdb = undefined,
.sect_contribs = undefined,
.modules = undefined,
};
try di.coff.loadHeader();
var path_buf: [windows.MAX_PATH]u8 = undefined;
const len = try di.coff.getPdbPath(path_buf[0..]);
const raw_path = path_buf[0..len];
const path = try fs.path.resolve(allocator, &[_][]const u8{raw_path});
try di.pdb.openFile(di.coff, path);
var pdb_stream = di.pdb.getStream(pdb.StreamType.Pdb) orelse return error.InvalidDebugInfo;
const version = try pdb_stream.inStream().readIntLittle(u32);
const signature = try pdb_stream.inStream().readIntLittle(u32);
const age = try pdb_stream.inStream().readIntLittle(u32);
var guid: [16]u8 = undefined;
try pdb_stream.inStream().readNoEof(&guid);
if (version != 20000404) // VC70, only value observed by LLVM team
return error.UnknownPDBVersion;
if (!mem.eql(u8, &di.coff.guid, &guid) or di.coff.age != age)
return error.PDBMismatch;
// We validated the executable and pdb match.
const string_table_index = str_tab_index: {
const name_bytes_len = try pdb_stream.inStream().readIntLittle(u32);
const name_bytes = try allocator.alloc(u8, name_bytes_len);
try pdb_stream.inStream().readNoEof(name_bytes);
const HashTableHeader = packed struct {
Size: u32,
Capacity: u32,
fn maxLoad(cap: u32) u32 {
return cap * 2 / 3 + 1;
}
};
const hash_tbl_hdr = try pdb_stream.inStream().readStruct(HashTableHeader);
if (hash_tbl_hdr.Capacity == 0)
return error.InvalidDebugInfo;
if (hash_tbl_hdr.Size > HashTableHeader.maxLoad(hash_tbl_hdr.Capacity))
return error.InvalidDebugInfo;
const present = try readSparseBitVector(&pdb_stream.inStream(), allocator);
if (present.len != hash_tbl_hdr.Size)
return error.InvalidDebugInfo;
const deleted = try readSparseBitVector(&pdb_stream.inStream(), allocator);
const Bucket = struct {
first: u32,
second: u32,
};
const bucket_list = try allocator.alloc(Bucket, present.len);
for (present) |_| {
const name_offset = try pdb_stream.inStream().readIntLittle(u32);
const name_index = try pdb_stream.inStream().readIntLittle(u32);
const name = mem.spanZ(std.meta.assumeSentinel(name_bytes.ptr + name_offset, 0));
if (mem.eql(u8, name, "/names")) {
break :str_tab_index name_index;
}
}
return error.MissingDebugInfo;
};
di.pdb.string_table = di.pdb.getStreamById(string_table_index) orelse return error.MissingDebugInfo;
di.pdb.dbi = di.pdb.getStream(pdb.StreamType.Dbi) orelse return error.MissingDebugInfo;
const dbi = di.pdb.dbi;
// Dbi Header
const dbi_stream_header = try dbi.inStream().readStruct(pdb.DbiStreamHeader);
if (dbi_stream_header.VersionHeader != 19990903) // V70, only value observed by LLVM team
return error.UnknownPDBVersion;
if (dbi_stream_header.Age != age)
return error.UnmatchingPDB;
const mod_info_size = dbi_stream_header.ModInfoSize;
const section_contrib_size = dbi_stream_header.SectionContributionSize;
var modules = ArrayList(Module).init(allocator);
// Module Info Substream
var mod_info_offset: usize = 0;
while (mod_info_offset != mod_info_size) {
const mod_info = try dbi.inStream().readStruct(pdb.ModInfo);
var this_record_len: usize = @sizeOf(pdb.ModInfo);
const module_name = try dbi.readNullTermString(allocator);
this_record_len += module_name.len + 1;
const obj_file_name = try dbi.readNullTermString(allocator);
this_record_len += obj_file_name.len + 1;
if (this_record_len % 4 != 0) {
const round_to_next_4 = (this_record_len | 0x3) + 1;
const march_forward_bytes = round_to_next_4 - this_record_len;
try dbi.seekBy(@intCast(isize, march_forward_bytes));
this_record_len += march_forward_bytes;
}
try modules.append(Module{
.mod_info = mod_info,
.module_name = module_name,
.obj_file_name = obj_file_name,
.populated = false,
.symbols = undefined,
.subsect_info = undefined,
.checksum_offset = null,
});
mod_info_offset += this_record_len;
if (mod_info_offset > mod_info_size)
return error.InvalidDebugInfo;
}
di.modules = modules.toOwnedSlice();
// Section Contribution Substream
var sect_contribs = ArrayList(pdb.SectionContribEntry).init(allocator);
var sect_cont_offset: usize = 0;
if (section_contrib_size != 0) {
const ver = @intToEnum(pdb.SectionContrSubstreamVersion, try dbi.inStream().readIntLittle(u32));
if (ver != pdb.SectionContrSubstreamVersion.Ver60)
return error.InvalidDebugInfo;
sect_cont_offset += @sizeOf(u32);
}
while (sect_cont_offset != section_contrib_size) {
const entry = try sect_contribs.addOne();
entry.* = try dbi.inStream().readStruct(pdb.SectionContribEntry);
sect_cont_offset += @sizeOf(pdb.SectionContribEntry);
if (sect_cont_offset > section_contrib_size)
return error.InvalidDebugInfo;
}
di.sect_contribs = sect_contribs.toOwnedSlice();
return di;
}
}
fn readSparseBitVector(stream: anytype, allocator: *mem.Allocator) ![]usize {
const num_words = try stream.readIntLittle(u32);
var word_i: usize = 0;
var list = ArrayList(usize).init(allocator);
while (word_i != num_words) : (word_i += 1) {
const word = try stream.readIntLittle(u32);
var bit_i: u5 = 0;
while (true) : (bit_i += 1) {
if (word & (@as(u32, 1) << bit_i) != 0) {
try list.append(word_i * 32 + bit_i);
}
if (bit_i == maxInt(u5)) break;
}
}
return list.toOwnedSlice();
}
fn chopSlice(ptr: []const u8, offset: u64, size: u64) ![]const u8 {
const start = try math.cast(usize, offset);
const end = start + try math.cast(usize, size);
return ptr[start..end];
}
/// This takes ownership of elf_file: users of this function should not close
/// it themselves, even on error.
/// TODO resources https://github.com/ziglang/zig/issues/4353
/// TODO it's weird to take ownership even on error, rework this code.
pub fn readElfDebugInfo(allocator: *mem.Allocator, elf_file: File) !ModuleDebugInfo {
nosuspend {
const mapped_mem = try mapWholeFile(elf_file);
const hdr = @ptrCast(*const elf.Ehdr, &mapped_mem[0]);
if (!mem.eql(u8, hdr.e_ident[0..4], "\x7fELF")) return error.InvalidElfMagic;
if (hdr.e_ident[elf.EI_VERSION] != 1) return error.InvalidElfVersion;
const endian: builtin.Endian = switch (hdr.e_ident[elf.EI_DATA]) {
elf.ELFDATA2LSB => .Little,
elf.ELFDATA2MSB => .Big,
else => return error.InvalidElfEndian,
};
assert(endian == std.builtin.endian); // this is our own debug info
const shoff = hdr.e_shoff;
const str_section_off = shoff + @as(u64, hdr.e_shentsize) * @as(u64, hdr.e_shstrndx);
const str_shdr = @ptrCast(
*const elf.Shdr,
@alignCast(@alignOf(elf.Shdr), &mapped_mem[try math.cast(usize, str_section_off)]),
);
const header_strings = mapped_mem[str_shdr.sh_offset .. str_shdr.sh_offset + str_shdr.sh_size];
const shdrs = @ptrCast(
[*]const elf.Shdr,
@alignCast(@alignOf(elf.Shdr), &mapped_mem[shoff]),
)[0..hdr.e_shnum];
var opt_debug_info: ?[]const u8 = null;
var opt_debug_abbrev: ?[]const u8 = null;
var opt_debug_str: ?[]const u8 = null;
var opt_debug_line: ?[]const u8 = null;
var opt_debug_ranges: ?[]const u8 = null;
for (shdrs) |*shdr| {
if (shdr.sh_type == elf.SHT_NULL) continue;
const name = std.mem.span(std.meta.assumeSentinel(header_strings[shdr.sh_name..].ptr, 0));
if (mem.eql(u8, name, ".debug_info")) {
opt_debug_info = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size);
} else if (mem.eql(u8, name, ".debug_abbrev")) {
opt_debug_abbrev = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size);
} else if (mem.eql(u8, name, ".debug_str")) {
opt_debug_str = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size);
} else if (mem.eql(u8, name, ".debug_line")) {
opt_debug_line = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size);
} else if (mem.eql(u8, name, ".debug_ranges")) {
opt_debug_ranges = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size);
}
}
var di = DW.DwarfInfo{
.endian = endian,
.debug_info = opt_debug_info orelse return error.MissingDebugInfo,
.debug_abbrev = opt_debug_abbrev orelse return error.MissingDebugInfo,
.debug_str = opt_debug_str orelse return error.MissingDebugInfo,
.debug_line = opt_debug_line orelse return error.MissingDebugInfo,
.debug_ranges = opt_debug_ranges,
};
try DW.openDwarfDebugInfo(&di, allocator);
return ModuleDebugInfo{
.base_address = undefined,
.dwarf = di,
.mapped_memory = mapped_mem,
};
}
}
/// TODO resources https://github.com/ziglang/zig/issues/4353
/// This takes ownership of coff_file: users of this function should not close
/// it themselves, even on error.
/// TODO it's weird to take ownership even on error, rework this code.
fn readMachODebugInfo(allocator: *mem.Allocator, macho_file: File) !ModuleDebugInfo {
const mapped_mem = try mapWholeFile(macho_file);
const hdr = @ptrCast(
*const macho.mach_header_64,
@alignCast(@alignOf(macho.mach_header_64), mapped_mem.ptr),
);
if (hdr.magic != macho.MH_MAGIC_64)
return error.InvalidDebugInfo;
const hdr_base = @ptrCast([*]const u8, hdr);
var ptr = hdr_base + @sizeOf(macho.mach_header_64);
var ncmd: u32 = hdr.ncmds;
const symtab = while (ncmd != 0) : (ncmd -= 1) {
const lc = @ptrCast(*const std.macho.load_command, ptr);
switch (lc.cmd) {
std.macho.LC_SYMTAB => break @ptrCast(*const std.macho.symtab_command, ptr),
else => {},
}
ptr = @alignCast(@alignOf(std.macho.load_command), ptr + lc.cmdsize);
} else {
return error.MissingDebugInfo;
};
const syms = @ptrCast([*]const macho.nlist_64, @alignCast(@alignOf(macho.nlist_64), hdr_base + symtab.symoff))[0..symtab.nsyms];
const strings = @ptrCast([*]const u8, hdr_base + symtab.stroff)[0 .. symtab.strsize - 1 :0];
const symbols_buf = try allocator.alloc(MachoSymbol, syms.len);
var ofile: ?*const macho.nlist_64 = null;
var reloc: u64 = 0;
var symbol_index: usize = 0;
var last_len: u64 = 0;
for (syms) |*sym| {
if (sym.n_type & std.macho.N_STAB != 0) {
switch (sym.n_type) {
std.macho.N_OSO => {
ofile = sym;
reloc = 0;
},
std.macho.N_FUN => {
if (sym.n_sect == 0) {
last_len = sym.n_value;
} else {
symbols_buf[symbol_index] = MachoSymbol{
.nlist = sym,
.ofile = ofile,
.reloc = reloc,
};
symbol_index += 1;
}
},
std.macho.N_BNSYM => {
if (reloc == 0) {
reloc = sym.n_value;
}
},
else => continue,
}
}
}
const sentinel = try allocator.create(macho.nlist_64);
sentinel.* = macho.nlist_64{
.n_strx = 0,
.n_type = 36,
.n_sect = 0,
.n_desc = 0,
.n_value = symbols_buf[symbol_index - 1].nlist.n_value + last_len,
};
const symbols = allocator.shrink(symbols_buf, symbol_index);
// Even though lld emits symbols in ascending order, this debug code
// should work for programs linked in any valid way.
// This sort is so that we can binary search later.
std.sort.sort(MachoSymbol, symbols, {}, MachoSymbol.addressLessThan);
return ModuleDebugInfo{
.base_address = undefined,
.mapped_memory = mapped_mem,
.ofiles = ModuleDebugInfo.OFileTable.init(allocator),
.symbols = symbols,
.strings = strings,
};
}
fn printLineFromFileAnyOs(out_stream: anytype, line_info: LineInfo) !void {
// Need this to always block even in async I/O mode, because this could potentially
// be called from e.g. the event loop code crashing.
var f = try fs.cwd().openFile(line_info.file_name, .{ .intended_io_mode = .blocking });
defer f.close();
// TODO fstat and make sure that the file has the correct size
var buf: [mem.page_size]u8 = undefined;
var line: usize = 1;
var column: usize = 1;
var abs_index: usize = 0;
while (true) {
const amt_read = try f.read(buf[0..]);
const slice = buf[0..amt_read];
for (slice) |byte| {
if (line == line_info.line) {
try out_stream.writeByte(byte);
if (byte == '\n') {
return;
}
}
if (byte == '\n') {
line += 1;
column = 1;
} else {
column += 1;
}
}
if (amt_read < buf.len) return error.EndOfFile;
}
}
const MachoSymbol = struct {
nlist: *const macho.nlist_64,
ofile: ?*const macho.nlist_64,
reloc: u64,
/// Returns the address from the macho file
fn address(self: MachoSymbol) u64 {
return self.nlist.n_value;
}
fn addressLessThan(context: void, lhs: MachoSymbol, rhs: MachoSymbol) bool {
return lhs.address() < rhs.address();
}
};
/// `file` is expected to have been opened with .intended_io_mode == .blocking.
/// Takes ownership of file, even on error.
/// TODO it's weird to take ownership even on error, rework this code.
fn mapWholeFile(file: File) ![]align(mem.page_size) const u8 {
nosuspend {
defer file.close();
const file_len = try math.cast(usize, try file.getEndPos());
const mapped_mem = try os.mmap(
null,
file_len,
os.PROT_READ,
os.MAP_SHARED,
file.handle,
0,
);
errdefer os.munmap(mapped_mem);
return mapped_mem;
}
}
pub const DebugInfo = struct {
allocator: *mem.Allocator,
address_map: std.AutoHashMap(usize, *ModuleDebugInfo),
pub fn init(allocator: *mem.Allocator) DebugInfo {
return DebugInfo{
.allocator = allocator,
.address_map = std.AutoHashMap(usize, *ModuleDebugInfo).init(allocator),
};
}
pub fn deinit(self: *DebugInfo) void {
// TODO: resources https://github.com/ziglang/zig/issues/4353
self.address_map.deinit();
}
pub fn getModuleForAddress(self: *DebugInfo, address: usize) !*ModuleDebugInfo {
if (comptime std.Target.current.isDarwin())
return self.lookupModuleDyld(address)
else if (builtin.os.tag == .windows)
return self.lookupModuleWin32(address)
else
return self.lookupModuleDl(address);
}
fn lookupModuleDyld(self: *DebugInfo, address: usize) !*ModuleDebugInfo {
const image_count = std.c._dyld_image_count();
var i: u32 = 0;
while (i < image_count) : (i += 1) {
const base_address = std.c._dyld_get_image_vmaddr_slide(i);
if (address < base_address) continue;
const header = std.c._dyld_get_image_header(i) orelse continue;
// The array of load commands is right after the header
var cmd_ptr = @intToPtr([*]u8, @ptrToInt(header) + @sizeOf(macho.mach_header_64));
var cmds = header.ncmds;
while (cmds != 0) : (cmds -= 1) {
const lc = @ptrCast(
*macho.load_command,
@alignCast(@alignOf(macho.load_command), cmd_ptr),
);
cmd_ptr += lc.cmdsize;
if (lc.cmd != macho.LC_SEGMENT_64) continue;
const segment_cmd = @ptrCast(
*const std.macho.segment_command_64,
@alignCast(@alignOf(std.macho.segment_command_64), lc),
);
const rebased_address = address - base_address;
const seg_start = segment_cmd.vmaddr;
const seg_end = seg_start + segment_cmd.vmsize;
if (rebased_address >= seg_start and rebased_address < seg_end) {
if (self.address_map.get(base_address)) |obj_di| {
return obj_di;
}
const obj_di = try self.allocator.create(ModuleDebugInfo);
errdefer self.allocator.destroy(obj_di);
const macho_path = mem.spanZ(std.c._dyld_get_image_name(i));
const macho_file = fs.cwd().openFile(macho_path, .{ .intended_io_mode = .blocking }) catch |err| switch (err) {
error.FileNotFound => return error.MissingDebugInfo,
else => return err,
};
obj_di.* = try readMachODebugInfo(self.allocator, macho_file);
obj_di.base_address = base_address;
try self.address_map.putNoClobber(base_address, obj_di);
return obj_di;
}
}
}
return error.MissingDebugInfo;
}
fn lookupModuleWin32(self: *DebugInfo, address: usize) !*ModuleDebugInfo {
const process_handle = windows.kernel32.GetCurrentProcess();
// Find how many modules are actually loaded
var dummy: windows.HMODULE = undefined;
var bytes_needed: windows.DWORD = undefined;
if (windows.kernel32.K32EnumProcessModules(
process_handle,
@ptrCast([*]windows.HMODULE, &dummy),
0,
&bytes_needed,
) == 0)
return error.MissingDebugInfo;
const needed_modules = bytes_needed / @sizeOf(windows.HMODULE);
// Fetch the complete module list
var modules = try self.allocator.alloc(windows.HMODULE, needed_modules);
defer self.allocator.free(modules);
if (windows.kernel32.K32EnumProcessModules(
process_handle,
modules.ptr,
try math.cast(windows.DWORD, modules.len * @sizeOf(windows.HMODULE)),
&bytes_needed,
) == 0)
return error.MissingDebugInfo;
// There's an unavoidable TOCTOU problem here, the module list may have
// changed between the two EnumProcessModules call.
// Pick the smallest amount of elements to avoid processing garbage.
const needed_modules_after = bytes_needed / @sizeOf(windows.HMODULE);
const loaded_modules = math.min(needed_modules, needed_modules_after);
for (modules[0..loaded_modules]) |module| {
var info: windows.MODULEINFO = undefined;
if (windows.kernel32.K32GetModuleInformation(
process_handle,
module,
&info,
@sizeOf(@TypeOf(info)),
) == 0)
return error.MissingDebugInfo;
const seg_start = @ptrToInt(info.lpBaseOfDll);
const seg_end = seg_start + info.SizeOfImage;
if (address >= seg_start and address < seg_end) {
if (self.address_map.get(seg_start)) |obj_di| {
return obj_di;
}
var name_buffer: [windows.PATH_MAX_WIDE + 4:0]u16 = undefined;
// openFileAbsoluteW requires the prefix to be present
mem.copy(u16, name_buffer[0..4], &[_]u16{ '\\', '?', '?', '\\' });
const len = windows.kernel32.K32GetModuleFileNameExW(
process_handle,
module,
@ptrCast(windows.LPWSTR, &name_buffer[4]),
windows.PATH_MAX_WIDE,
);
assert(len > 0);
const obj_di = try self.allocator.create(ModuleDebugInfo);
errdefer self.allocator.destroy(obj_di);
const coff_file = fs.openFileAbsoluteW(name_buffer[0 .. len + 4 :0], .{}) catch |err| switch (err) {
error.FileNotFound => return error.MissingDebugInfo,
else => return err,
};
obj_di.* = try readCoffDebugInfo(self.allocator, coff_file);
obj_di.base_address = seg_start;
try self.address_map.putNoClobber(seg_start, obj_di);
return obj_di;
}
}
return error.MissingDebugInfo;
}
fn lookupModuleDl(self: *DebugInfo, address: usize) !*ModuleDebugInfo {
var ctx: struct {
// Input
address: usize,
// Output
base_address: usize = undefined,
name: []const u8 = undefined,
} = .{ .address = address };
const CtxTy = @TypeOf(ctx);
if (os.dl_iterate_phdr(&ctx, anyerror, struct {
fn callback(info: *os.dl_phdr_info, size: usize, context: *CtxTy) !void {
// The base address is too high
if (context.address < info.dlpi_addr)
return;
const phdrs = info.dlpi_phdr[0..info.dlpi_phnum];
for (phdrs) |*phdr| {
if (phdr.p_type != elf.PT_LOAD) continue;
const seg_start = info.dlpi_addr + phdr.p_vaddr;
const seg_end = seg_start + phdr.p_memsz;
if (context.address >= seg_start and context.address < seg_end) {
// Android libc uses NULL instead of an empty string to mark the
// main program
context.name = mem.spanZ(info.dlpi_name) orelse "";
context.base_address = info.dlpi_addr;
// Stop the iteration
return error.Found;
}
}
}
}.callback)) {
return error.MissingDebugInfo;
} else |err| switch (err) {
error.Found => {},
else => return error.MissingDebugInfo,
}
if (self.address_map.get(ctx.base_address)) |obj_di| {
return obj_di;
}
const obj_di = try self.allocator.create(ModuleDebugInfo);
errdefer self.allocator.destroy(obj_di);
// TODO https://github.com/ziglang/zig/issues/5525
const copy = if (ctx.name.len > 0)
fs.cwd().openFile(ctx.name, .{ .intended_io_mode = .blocking })
else
fs.openSelfExe(.{ .intended_io_mode = .blocking });
const elf_file = copy catch |err| switch (err) {
error.FileNotFound => return error.MissingDebugInfo,
else => return err,
};
obj_di.* = try readElfDebugInfo(self.allocator, elf_file);
obj_di.base_address = ctx.base_address;
try self.address_map.putNoClobber(ctx.base_address, obj_di);
return obj_di;
}
};
const SymbolInfo = struct {
symbol_name: []const u8 = "???",
compile_unit_name: []const u8 = "???",
line_info: ?LineInfo = null,
fn deinit(self: @This()) void {
if (self.line_info) |li| {
li.deinit();
}
}
};
pub const ModuleDebugInfo = switch (builtin.os.tag) {
.macos, .ios, .watchos, .tvos => struct {
base_address: usize,
mapped_memory: []const u8,
symbols: []const MachoSymbol,
strings: [:0]const u8,
ofiles: OFileTable,
const OFileTable = std.StringHashMap(DW.DwarfInfo);
pub fn allocator(self: @This()) *mem.Allocator {
return self.ofiles.allocator;
}
fn loadOFile(self: *@This(), o_file_path: []const u8) !DW.DwarfInfo {
const o_file = try fs.cwd().openFile(o_file_path, .{ .intended_io_mode = .blocking });
const mapped_mem = try mapWholeFile(o_file);
const hdr = @ptrCast(
*const macho.mach_header_64,
@alignCast(@alignOf(macho.mach_header_64), mapped_mem.ptr),
);
if (hdr.magic != std.macho.MH_MAGIC_64)
return error.InvalidDebugInfo;
const hdr_base = @ptrCast([*]const u8, hdr);
var ptr = hdr_base + @sizeOf(macho.mach_header_64);
var ncmd: u32 = hdr.ncmds;
const segcmd = while (ncmd != 0) : (ncmd -= 1) {
const lc = @ptrCast(*const std.macho.load_command, ptr);
switch (lc.cmd) {
std.macho.LC_SEGMENT_64 => {
break @ptrCast(
*const std.macho.segment_command_64,
@alignCast(@alignOf(std.macho.segment_command_64), ptr),
);
},
else => {},
}
ptr = @alignCast(@alignOf(std.macho.load_command), ptr + lc.cmdsize);
} else {
return error.MissingDebugInfo;
};
var opt_debug_line: ?*const macho.section_64 = null;
var opt_debug_info: ?*const macho.section_64 = null;
var opt_debug_abbrev: ?*const macho.section_64 = null;
var opt_debug_str: ?*const macho.section_64 = null;
var opt_debug_ranges: ?*const macho.section_64 = null;
const sections = @ptrCast(
[*]const macho.section_64,
@alignCast(@alignOf(macho.section_64), ptr + @sizeOf(std.macho.segment_command_64)),
)[0..segcmd.nsects];
for (sections) |*sect| {
// The section name may not exceed 16 chars and a trailing null may
// not be present
const name = if (mem.indexOfScalar(u8, sect.sectname[0..], 0)) |last|
sect.sectname[0..last]
else
sect.sectname[0..];
if (mem.eql(u8, name, "__debug_line")) {
opt_debug_line = sect;
} else if (mem.eql(u8, name, "__debug_info")) {
opt_debug_info = sect;
} else if (mem.eql(u8, name, "__debug_abbrev")) {
opt_debug_abbrev = sect;
} else if (mem.eql(u8, name, "__debug_str")) {
opt_debug_str = sect;
} else if (mem.eql(u8, name, "__debug_ranges")) {
opt_debug_ranges = sect;
}
}
const debug_line = opt_debug_line orelse
return error.MissingDebugInfo;
const debug_info = opt_debug_info orelse
return error.MissingDebugInfo;
const debug_str = opt_debug_str orelse
return error.MissingDebugInfo;
const debug_abbrev = opt_debug_abbrev orelse
return error.MissingDebugInfo;
var di = DW.DwarfInfo{
.endian = .Little,
.debug_info = try chopSlice(mapped_mem, debug_info.offset, debug_info.size),
.debug_abbrev = try chopSlice(mapped_mem, debug_abbrev.offset, debug_abbrev.size),
.debug_str = try chopSlice(mapped_mem, debug_str.offset, debug_str.size),
.debug_line = try chopSlice(mapped_mem, debug_line.offset, debug_line.size),
.debug_ranges = if (opt_debug_ranges) |debug_ranges|
try chopSlice(mapped_mem, debug_ranges.offset, debug_ranges.size)
else
null,
};
try DW.openDwarfDebugInfo(&di, self.allocator());
// Add the debug info to the cache
try self.ofiles.putNoClobber(o_file_path, di);
return di;
}
fn getSymbolAtAddress(self: *@This(), address: usize) !SymbolInfo {
nosuspend {
// Translate the VA into an address into this object
const relocated_address = address - self.base_address;
assert(relocated_address >= 0x100000000);
// Find the .o file where this symbol is defined
const symbol = machoSearchSymbols(self.symbols, relocated_address) orelse
return SymbolInfo{};
// Take the symbol name from the N_FUN STAB entry, we're going to
// use it if we fail to find the DWARF infos
const stab_symbol = mem.spanZ(self.strings[symbol.nlist.n_strx..]);
if (symbol.ofile == null)
return SymbolInfo{ .symbol_name = stab_symbol };
const o_file_path = mem.spanZ(self.strings[symbol.ofile.?.n_strx..]);
// Check if its debug infos are already in the cache
var o_file_di = self.ofiles.get(o_file_path) orelse
(self.loadOFile(o_file_path) catch |err| switch (err) {
error.FileNotFound,
error.MissingDebugInfo,
error.InvalidDebugInfo,
=> {
return SymbolInfo{ .symbol_name = stab_symbol };
},
else => return err,
});
// Translate again the address, this time into an address inside the
// .o file
const relocated_address_o = relocated_address - symbol.reloc;
if (o_file_di.findCompileUnit(relocated_address_o)) |compile_unit| {
return SymbolInfo{
.symbol_name = o_file_di.getSymbolName(relocated_address_o) orelse "???",
.compile_unit_name = compile_unit.die.getAttrString(&o_file_di, DW.AT_name) catch |err| switch (err) {
error.MissingDebugInfo, error.InvalidDebugInfo => "???",
else => return err,
},
.line_info = o_file_di.getLineNumberInfo(compile_unit.*, relocated_address_o) catch |err| switch (err) {
error.MissingDebugInfo, error.InvalidDebugInfo => null,
else => return err,
},
};
} else |err| switch (err) {
error.MissingDebugInfo, error.InvalidDebugInfo => {
return SymbolInfo{ .symbol_name = stab_symbol };
},
else => return err,
}
unreachable;
}
}
},
.uefi, .windows => struct {
base_address: usize,
pdb: pdb.Pdb,
coff: *coff.Coff,
sect_contribs: []pdb.SectionContribEntry,
modules: []Module,
pub fn allocator(self: @This()) *mem.Allocator {
return self.coff.allocator;
}
fn getSymbolAtAddress(self: *@This(), address: usize) !SymbolInfo {
// Translate the VA into an address into this object
const relocated_address = address - self.base_address;
var coff_section: *coff.Section = undefined;
const mod_index = for (self.sect_contribs) |sect_contrib| {
if (sect_contrib.Section > self.coff.sections.items.len) continue;
// Remember that SectionContribEntry.Section is 1-based.
coff_section = &self.coff.sections.items[sect_contrib.Section - 1];
const vaddr_start = coff_section.header.virtual_address + sect_contrib.Offset;
const vaddr_end = vaddr_start + sect_contrib.Size;
if (relocated_address >= vaddr_start and relocated_address < vaddr_end) {
break sect_contrib.ModuleIndex;
}
} else {
// we have no information to add to the address
return SymbolInfo{};
};
const mod = &self.modules[mod_index];
try populateModule(self, mod);
const obj_basename = fs.path.basename(mod.obj_file_name);
var symbol_i: usize = 0;
const symbol_name = if (!mod.populated) "???" else while (symbol_i != mod.symbols.len) {
const prefix = @ptrCast(*pdb.RecordPrefix, &mod.symbols[symbol_i]);
if (prefix.RecordLen < 2)
return error.InvalidDebugInfo;
switch (prefix.RecordKind) {
.S_LPROC32, .S_GPROC32 => {
const proc_sym = @ptrCast(*pdb.ProcSym, &mod.symbols[symbol_i + @sizeOf(pdb.RecordPrefix)]);
const vaddr_start = coff_section.header.virtual_address + proc_sym.CodeOffset;
const vaddr_end = vaddr_start + proc_sym.CodeSize;
if (relocated_address >= vaddr_start and relocated_address < vaddr_end) {
break mem.spanZ(@ptrCast([*:0]u8, proc_sym) + @sizeOf(pdb.ProcSym));
}
},
else => {},
}
symbol_i += prefix.RecordLen + @sizeOf(u16);
if (symbol_i > mod.symbols.len)
return error.InvalidDebugInfo;
} else "???";
const subsect_info = mod.subsect_info;
var sect_offset: usize = 0;
var skip_len: usize = undefined;
const opt_line_info = subsections: {
const checksum_offset = mod.checksum_offset orelse break :subsections null;
while (sect_offset != subsect_info.len) : (sect_offset += skip_len) {
const subsect_hdr = @ptrCast(*pdb.DebugSubsectionHeader, &subsect_info[sect_offset]);
skip_len = subsect_hdr.Length;
sect_offset += @sizeOf(pdb.DebugSubsectionHeader);
switch (subsect_hdr.Kind) {
.Lines => {
var line_index = sect_offset;
const line_hdr = @ptrCast(*pdb.LineFragmentHeader, &subsect_info[line_index]);
if (line_hdr.RelocSegment == 0)
return error.MissingDebugInfo;
line_index += @sizeOf(pdb.LineFragmentHeader);
const frag_vaddr_start = coff_section.header.virtual_address + line_hdr.RelocOffset;
const frag_vaddr_end = frag_vaddr_start + line_hdr.CodeSize;
if (relocated_address >= frag_vaddr_start and relocated_address < frag_vaddr_end) {
// There is an unknown number of LineBlockFragmentHeaders (and their accompanying line and column records)
// from now on. We will iterate through them, and eventually find a LineInfo that we're interested in,
// breaking out to :subsections. If not, we will make sure to not read anything outside of this subsection.
const subsection_end_index = sect_offset + subsect_hdr.Length;
while (line_index < subsection_end_index) {
const block_hdr = @ptrCast(*pdb.LineBlockFragmentHeader, &subsect_info[line_index]);
line_index += @sizeOf(pdb.LineBlockFragmentHeader);
const start_line_index = line_index;
const has_column = line_hdr.Flags.LF_HaveColumns;
// All line entries are stored inside their line block by ascending start address.
// Heuristic: we want to find the last line entry
// that has a vaddr_start <= relocated_address.
// This is done with a simple linear search.
var line_i: u32 = 0;
while (line_i < block_hdr.NumLines) : (line_i += 1) {
const line_num_entry = @ptrCast(*pdb.LineNumberEntry, &subsect_info[line_index]);
line_index += @sizeOf(pdb.LineNumberEntry);
const vaddr_start = frag_vaddr_start + line_num_entry.Offset;
if (relocated_address < vaddr_start) {
break;
}
}
// line_i == 0 would mean that no matching LineNumberEntry was found.
if (line_i > 0) {
const subsect_index = checksum_offset + block_hdr.NameIndex;
const chksum_hdr = @ptrCast(*pdb.FileChecksumEntryHeader, &mod.subsect_info[subsect_index]);
const strtab_offset = @sizeOf(pdb.PDBStringTableHeader) + chksum_hdr.FileNameOffset;
try self.pdb.string_table.seekTo(strtab_offset);
const source_file_name = try self.pdb.string_table.readNullTermString(self.allocator());
const line_entry_idx = line_i - 1;
const column = if (has_column) blk: {
const start_col_index = start_line_index + @sizeOf(pdb.LineNumberEntry) * block_hdr.NumLines;
const col_index = start_col_index + @sizeOf(pdb.ColumnNumberEntry) * line_entry_idx;
const col_num_entry = @ptrCast(*pdb.ColumnNumberEntry, &subsect_info[col_index]);
break :blk col_num_entry.StartColumn;
} else 0;
const found_line_index = start_line_index + line_entry_idx * @sizeOf(pdb.LineNumberEntry);
const line_num_entry = @ptrCast(*pdb.LineNumberEntry, &subsect_info[found_line_index]);
const flags = @ptrCast(*pdb.LineNumberEntry.Flags, &line_num_entry.Flags);
break :subsections LineInfo{
.allocator = self.allocator(),
.file_name = source_file_name,
.line = flags.Start,
.column = column,
};
}
}
// Checking that we are not reading garbage after the (possibly) multiple block fragments.
if (line_index != subsection_end_index) {
return error.InvalidDebugInfo;
}
}
},
else => {},
}
if (sect_offset > subsect_info.len)
return error.InvalidDebugInfo;
} else {
break :subsections null;
}
};
return SymbolInfo{
.symbol_name = symbol_name,
.compile_unit_name = obj_basename,
.line_info = opt_line_info,
};
}
},
.linux, .netbsd, .freebsd, .dragonfly, .openbsd => struct {
base_address: usize,
dwarf: DW.DwarfInfo,
mapped_memory: []const u8,
fn getSymbolAtAddress(self: *@This(), address: usize) !SymbolInfo {
// Translate the VA into an address into this object
const relocated_address = address - self.base_address;
if (nosuspend self.dwarf.findCompileUnit(relocated_address)) |compile_unit| {
return SymbolInfo{
.symbol_name = nosuspend self.dwarf.getSymbolName(relocated_address) orelse "???",
.compile_unit_name = compile_unit.die.getAttrString(&self.dwarf, DW.AT_name) catch |err| switch (err) {
error.MissingDebugInfo, error.InvalidDebugInfo => "???",
else => return err,
},
.line_info = nosuspend self.dwarf.getLineNumberInfo(compile_unit.*, relocated_address) catch |err| switch (err) {
error.MissingDebugInfo, error.InvalidDebugInfo => null,
else => return err,
},
};
} else |err| switch (err) {
error.MissingDebugInfo, error.InvalidDebugInfo => {
return SymbolInfo{};
},
else => return err,
}
unreachable;
}
},
else => DW.DwarfInfo,
};
/// TODO multithreaded awareness
var debug_info_allocator: ?*mem.Allocator = null;
var debug_info_arena_allocator: std.heap.ArenaAllocator = undefined;
fn getDebugInfoAllocator() *mem.Allocator {
if (debug_info_allocator) |a| return a;
debug_info_arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
debug_info_allocator = &debug_info_arena_allocator.allocator;
return &debug_info_arena_allocator.allocator;
}
/// Whether or not the current target can print useful debug information when a segfault occurs.
pub const have_segfault_handling_support = switch (builtin.os.tag) {
.linux, .netbsd => true,
.windows => true,
else => false,
};
pub const enable_segfault_handler: bool = if (@hasDecl(root, "enable_segfault_handler"))
root.enable_segfault_handler
else
runtime_safety and have_segfault_handling_support;
pub fn maybeEnableSegfaultHandler() void {
if (enable_segfault_handler) {
std.debug.attachSegfaultHandler();
}
}
var windows_segfault_handle: ?windows.HANDLE = null;
/// Attaches a global SIGSEGV handler which calls @panic("segmentation fault");
pub fn attachSegfaultHandler() void {
if (!have_segfault_handling_support) {
@compileError("segfault handler not supported for this target");
}
if (builtin.os.tag == .windows) {
windows_segfault_handle = windows.kernel32.AddVectoredExceptionHandler(0, handleSegfaultWindows);
return;
}
var act = os.Sigaction{
.sigaction = handleSegfaultLinux,
.mask = os.empty_sigset,
.flags = (os.SA_SIGINFO | os.SA_RESTART | os.SA_RESETHAND),
};
os.sigaction(os.SIGSEGV, &act, null);
os.sigaction(os.SIGILL, &act, null);
os.sigaction(os.SIGBUS, &act, null);
}
fn resetSegfaultHandler() void {
if (builtin.os.tag == .windows) {
if (windows_segfault_handle) |handle| {
assert(windows.kernel32.RemoveVectoredExceptionHandler(handle) != 0);
windows_segfault_handle = null;
}
return;
}
var act = os.Sigaction{
.sigaction = os.SIG_DFL,
.mask = os.empty_sigset,
.flags = 0,
};
os.sigaction(os.SIGSEGV, &act, null);
os.sigaction(os.SIGILL, &act, null);
os.sigaction(os.SIGBUS, &act, null);
}
fn handleSegfaultLinux(sig: i32, info: *const os.siginfo_t, ctx_ptr: ?*const c_void) callconv(.C) noreturn {
// Reset to the default handler so that if a segfault happens in this handler it will crash
// the process. Also when this handler returns, the original instruction will be repeated
// and the resulting segfault will crash the process rather than continually dump stack traces.
resetSegfaultHandler();
const addr = switch (builtin.os.tag) {
.linux => @ptrToInt(info.fields.sigfault.addr),
.netbsd => @ptrToInt(info.info.reason.fault.addr),
else => unreachable,
};
// Don't use std.debug.print() as stderr_mutex may still be locked.
const stderr = io.getStdErr().writer();
_ = switch (sig) {
os.SIGSEGV => stderr.print("Segmentation fault at address 0x{x}\n", .{addr}),
os.SIGILL => stderr.print("Illegal instruction at address 0x{x}\n", .{addr}),
os.SIGBUS => stderr.print("Bus error at address 0x{x}\n", .{addr}),
else => unreachable,
} catch os.abort();
switch (builtin.arch) {
.i386 => {
const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
const ip = @intCast(usize, ctx.mcontext.gregs[os.REG_EIP]);
const bp = @intCast(usize, ctx.mcontext.gregs[os.REG_EBP]);
dumpStackTraceFromBase(bp, ip);
},
.x86_64 => {
const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
const ip = @intCast(usize, ctx.mcontext.gregs[os.REG_RIP]);
const bp = @intCast(usize, ctx.mcontext.gregs[os.REG_RBP]);
dumpStackTraceFromBase(bp, ip);
},
.arm => {
const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
const ip = @intCast(usize, ctx.mcontext.arm_pc);
const bp = @intCast(usize, ctx.mcontext.arm_fp);
dumpStackTraceFromBase(bp, ip);
},
.aarch64 => {
const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
const ip = @intCast(usize, ctx.mcontext.pc);
// x29 is the ABI-designated frame pointer
const bp = @intCast(usize, ctx.mcontext.regs[29]);
dumpStackTraceFromBase(bp, ip);
},
else => {},
}
// We cannot allow the signal handler to return because when it runs the original instruction
// again, the memory may be mapped and undefined behavior would occur rather than repeating
// the segfault. So we simply abort here.
os.abort();
}
fn handleSegfaultWindows(info: *windows.EXCEPTION_POINTERS) callconv(windows.WINAPI) c_long {
switch (info.ExceptionRecord.ExceptionCode) {
windows.EXCEPTION_DATATYPE_MISALIGNMENT => handleSegfaultWindowsExtra(info, 0, "Unaligned Memory Access"),
windows.EXCEPTION_ACCESS_VIOLATION => handleSegfaultWindowsExtra(info, 1, null),
windows.EXCEPTION_ILLEGAL_INSTRUCTION => handleSegfaultWindowsExtra(info, 2, null),
windows.EXCEPTION_STACK_OVERFLOW => handleSegfaultWindowsExtra(info, 0, "Stack Overflow"),
else => return windows.EXCEPTION_CONTINUE_SEARCH,
}
}
// zig won't let me use an anon enum here https://github.com/ziglang/zig/issues/3707
fn handleSegfaultWindowsExtra(info: *windows.EXCEPTION_POINTERS, comptime msg: u8, comptime format: ?[]const u8) noreturn {
const exception_address = @ptrToInt(info.ExceptionRecord.ExceptionAddress);
if (@hasDecl(windows, "CONTEXT")) {
const regs = info.ContextRecord.getRegs();
// Don't use std.debug.print() as stderr_mutex may still be locked.
const stderr = io.getStdErr().writer();
_ = switch (msg) {
0 => stderr.print("{s}\n", .{format.?}),
1 => stderr.print("Segmentation fault at address 0x{x}\n", .{info.ExceptionRecord.ExceptionInformation[1]}),
2 => stderr.print("Illegal instruction at address 0x{x}\n", .{regs.ip}),
else => unreachable,
} catch os.abort();
dumpStackTraceFromBase(regs.bp, regs.ip);
os.abort();
} else {
switch (msg) {
0 => panicExtra(null, exception_address, format.?, .{}),
1 => panicExtra(null, exception_address, "Segmentation fault at address 0x{x}", .{info.ExceptionRecord.ExceptionInformation[1]}),
2 => panicExtra(null, exception_address, "Illegal Instruction", .{}),
else => unreachable,
}
}
}
pub fn dumpStackPointerAddr(prefix: []const u8) void {
const sp = asm (""
: [argc] "={rsp}" (-> usize)
);
std.debug.warn("{} sp = 0x{x}\n", .{ prefix, sp });
} | lib/std/debug.zig |
const std = @import("std");
const builtin = @import("builtin");
const expect = std.testing.expect;
test "while loop" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
var i: i32 = 0;
while (i < 4) {
i += 1;
}
try expect(i == 4);
try expect(whileLoop1() == 1);
}
fn whileLoop1() i32 {
return whileLoop2();
}
fn whileLoop2() i32 {
while (true) {
return 1;
}
}
test "static eval while" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try expect(static_eval_while_number == 1);
}
const static_eval_while_number = staticWhileLoop1();
fn staticWhileLoop1() i32 {
return staticWhileLoop2();
}
fn staticWhileLoop2() i32 {
while (true) {
return 1;
}
}
test "while with continue expression" {
var sum: i32 = 0;
{
var i: i32 = 0;
while (i < 10) : (i += 1) {
if (i == 5) continue;
sum += i;
}
}
try expect(sum == 40);
}
test "while with else" {
var sum: i32 = 0;
var i: i32 = 0;
var got_else: i32 = 0;
while (i < 10) : (i += 1) {
sum += 1;
} else {
got_else += 1;
}
try expect(sum == 10);
try expect(got_else == 1);
}
var numbers_left: i32 = undefined;
fn getNumberOrErr() anyerror!i32 {
return if (numbers_left == 0) error.OutOfNumbers else x: {
numbers_left -= 1;
break :x numbers_left;
};
}
fn getNumberOrNull() ?i32 {
return if (numbers_left == 0) null else x: {
numbers_left -= 1;
break :x numbers_left;
};
}
test "continue outer while loop" {
testContinueOuter();
comptime testContinueOuter();
}
fn testContinueOuter() void {
var i: usize = 0;
outer: while (i < 10) : (i += 1) {
while (true) {
continue :outer;
}
}
}
test "break from outer while loop" {
testBreakOuter();
comptime testBreakOuter();
}
fn testBreakOuter() void {
outer: while (true) {
while (true) {
break :outer;
}
}
}
test "while copies its payload" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
var tmp: ?i32 = 10;
while (tmp) |value| {
// Modify the original variable
tmp = null;
try expect(value == 10);
}
}
};
try S.doTheTest();
comptime try S.doTheTest();
}
test "continue and break" {
if (builtin.zig_backend == .stage2_aarch64 and builtin.os.tag == .macos) return error.SkipZigTest;
try runContinueAndBreakTest();
try expect(continue_and_break_counter == 8);
}
var continue_and_break_counter: i32 = 0;
fn runContinueAndBreakTest() !void {
var i: i32 = 0;
while (true) {
continue_and_break_counter += 2;
i += 1;
if (i < 4) {
continue;
}
break;
}
try expect(i == 4);
}
test "while with optional as condition" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
numbers_left = 10;
var sum: i32 = 0;
while (getNumberOrNull()) |value| {
sum += value;
}
try expect(sum == 45);
}
test "while with optional as condition with else" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
numbers_left = 10;
var sum: i32 = 0;
var got_else: i32 = 0;
while (getNumberOrNull()) |value| {
sum += value;
try expect(got_else == 0);
} else {
got_else += 1;
}
try expect(sum == 45);
try expect(got_else == 1);
}
test "while with error union condition" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
numbers_left = 10;
var sum: i32 = 0;
var got_else: i32 = 0;
while (getNumberOrErr()) |value| {
sum += value;
} else |err| {
try expect(err == error.OutOfNumbers);
got_else += 1;
}
try expect(sum == 45);
try expect(got_else == 1);
}
test "while on bool with else result follow else prong" {
const result = while (returnFalse()) {
break @as(i32, 10);
} else @as(i32, 2);
try expect(result == 2);
}
test "while on bool with else result follow break prong" {
const result = while (returnTrue()) {
break @as(i32, 10);
} else @as(i32, 2);
try expect(result == 10);
}
test "while on optional with else result follow else prong" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const result = while (returnNull()) |value| {
break value;
} else @as(i32, 2);
try expect(result == 2);
}
test "while on optional with else result follow break prong" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const result = while (returnOptional(10)) |value| {
break value;
} else @as(i32, 2);
try expect(result == 10);
}
fn returnNull() ?i32 {
return null;
}
fn returnOptional(x: i32) ?i32 {
return x;
}
fn returnError() anyerror!i32 {
return error.YouWantedAnError;
}
fn returnSuccess(x: i32) anyerror!i32 {
return x;
}
fn returnFalse() bool {
return false;
}
fn returnTrue() bool {
return true;
}
test "return with implicit cast from while loop" {
returnWithImplicitCastFromWhileLoopTest() catch unreachable;
}
fn returnWithImplicitCastFromWhileLoopTest() anyerror!void {
while (true) {
return;
}
}
test "while on error union with else result follow else prong" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const result = while (returnError()) |value| {
break value;
} else |_| @as(i32, 2);
try expect(result == 2);
}
test "while on error union with else result follow break prong" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const result = while (returnSuccess(10)) |value| {
break value;
} else |_| @as(i32, 2);
try expect(result == 10);
}
test "while bool 2 break statements and an else" {
const S = struct {
fn entry(t: bool, f: bool) !void {
var ok = false;
ok = while (t) {
if (f) break false;
if (t) break true;
} else false;
try expect(ok);
}
};
try S.entry(true, false);
comptime try S.entry(true, false);
}
test "while optional 2 break statements and an else" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const S = struct {
fn entry(opt_t: ?bool, f: bool) !void {
var ok = false;
ok = while (opt_t) |t| {
if (f) break false;
if (t) break true;
} else false;
try expect(ok);
}
};
try S.entry(true, false);
comptime try S.entry(true, false);
}
test "while error 2 break statements and an else" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const S = struct {
fn entry(opt_t: anyerror!bool, f: bool) !void {
var ok = false;
ok = while (opt_t) |t| {
if (f) break false;
if (t) break true;
} else |_| false;
try expect(ok);
}
};
try S.entry(true, false);
comptime try S.entry(true, false);
} | test/behavior/while.zig |
const builtin = @import("builtin");
const std = @import("std");
const Builder = std.build.Builder;
const tests = @import("test/tests.zig");
const os = std.os;
const BufMap = std.BufMap;
const warn = std.debug.warn;
const mem = std.mem;
const ArrayList = std.ArrayList;
const Buffer = std.Buffer;
const io = std.io;
pub fn build(b: *Builder) !void {
const mode = b.standardReleaseOptions();
var docgen_exe = b.addExecutable("docgen", "doc/docgen.zig");
const rel_zig_exe = try os.path.relative(b.allocator, b.build_root, b.zig_exe);
const langref_out_path = os.path.join(b.allocator, b.cache_root, "langref.html") catch unreachable;
var docgen_cmd = b.addCommand(null, b.env_map, [][]const u8{
docgen_exe.getOutputPath(),
rel_zig_exe,
"doc" ++ os.path.sep_str ++ "langref.html.in",
langref_out_path,
});
docgen_cmd.step.dependOn(&docgen_exe.step);
const docs_step = b.step("docs", "Build documentation");
docs_step.dependOn(&docgen_cmd.step);
const test_step = b.step("test", "Run all the tests");
// find the stage0 build artifacts because we're going to re-use config.h and zig_cpp library
const build_info = try b.exec([][]const u8{
b.zig_exe,
"BUILD_INFO",
});
var index: usize = 0;
var ctx = Context{
.cmake_binary_dir = nextValue(&index, build_info),
.cxx_compiler = nextValue(&index, build_info),
.llvm_config_exe = nextValue(&index, build_info),
.lld_include_dir = nextValue(&index, build_info),
.lld_libraries = nextValue(&index, build_info),
.std_files = nextValue(&index, build_info),
.c_header_files = nextValue(&index, build_info),
.dia_guids_lib = nextValue(&index, build_info),
.llvm = undefined,
.no_rosegment = b.option(bool, "no-rosegment", "Workaround to enable valgrind builds") orelse false,
};
ctx.llvm = try findLLVM(b, ctx.llvm_config_exe);
var test_stage2 = b.addTest("src-self-hosted/test.zig");
test_stage2.setBuildMode(builtin.Mode.Debug);
var exe = b.addExecutable("zig", "src-self-hosted/main.zig");
exe.setBuildMode(mode);
try configureStage2(b, test_stage2, ctx);
try configureStage2(b, exe, ctx);
b.default_step.dependOn(&exe.step);
const skip_release = b.option(bool, "skip-release", "Main test suite skips release builds") orelse false;
const skip_release_small = b.option(bool, "skip-release-small", "Main test suite skips release-small builds") orelse skip_release;
const skip_release_fast = b.option(bool, "skip-release-fast", "Main test suite skips release-fast builds") orelse skip_release;
const skip_release_safe = b.option(bool, "skip-release-safe", "Main test suite skips release-safe builds") orelse skip_release;
const skip_self_hosted = b.option(bool, "skip-self-hosted", "Main test suite skips building self hosted compiler") orelse false;
if (!skip_self_hosted) {
test_step.dependOn(&exe.step);
}
const verbose_link_exe = b.option(bool, "verbose-link", "Print link command for self hosted compiler") orelse false;
exe.setVerboseLink(verbose_link_exe);
b.installArtifact(exe);
installStdLib(b, ctx.std_files);
installCHeaders(b, ctx.c_header_files);
const test_filter = b.option([]const u8, "test-filter", "Skip tests that do not match filter");
const test_stage2_step = b.step("test-stage2", "Run the stage2 compiler tests");
test_stage2_step.dependOn(&test_stage2.step);
// TODO see https://github.com/ziglang/zig/issues/1364
if (false) {
test_step.dependOn(test_stage2_step);
}
var chosen_modes: [4]builtin.Mode = undefined;
var chosen_mode_index: usize = 0;
chosen_modes[chosen_mode_index] = builtin.Mode.Debug;
chosen_mode_index += 1;
if (!skip_release_safe) {
chosen_modes[chosen_mode_index] = builtin.Mode.ReleaseSafe;
chosen_mode_index += 1;
}
if (!skip_release_fast) {
chosen_modes[chosen_mode_index] = builtin.Mode.ReleaseFast;
chosen_mode_index += 1;
}
if (!skip_release_small) {
chosen_modes[chosen_mode_index] = builtin.Mode.ReleaseSmall;
chosen_mode_index += 1;
}
const modes = chosen_modes[0..chosen_mode_index];
test_step.dependOn(tests.addPkgTests(b, test_filter, "test/behavior.zig", "behavior", "Run the behavior tests", modes));
test_step.dependOn(tests.addPkgTests(b, test_filter, "std/index.zig", "std", "Run the standard library tests", modes));
test_step.dependOn(tests.addPkgTests(b, test_filter, "std/special/compiler_rt/index.zig", "compiler-rt", "Run the compiler_rt tests", modes));
test_step.dependOn(tests.addCompareOutputTests(b, test_filter, modes));
test_step.dependOn(tests.addBuildExampleTests(b, test_filter, modes));
test_step.dependOn(tests.addCliTests(b, test_filter, modes));
test_step.dependOn(tests.addCompileErrorTests(b, test_filter, modes));
test_step.dependOn(tests.addAssembleAndLinkTests(b, test_filter, modes));
test_step.dependOn(tests.addRuntimeSafetyTests(b, test_filter, modes));
test_step.dependOn(tests.addTranslateCTests(b, test_filter));
test_step.dependOn(tests.addGenHTests(b, test_filter));
test_step.dependOn(docs_step);
}
fn dependOnLib(b: *Builder, lib_exe_obj: var, dep: LibraryDep) void {
for (dep.libdirs.toSliceConst()) |lib_dir| {
lib_exe_obj.addLibPath(lib_dir);
}
const lib_dir = os.path.join(b.allocator, dep.prefix, "lib") catch unreachable;
for (dep.system_libs.toSliceConst()) |lib| {
const static_bare_name = if (mem.eql(u8, lib, "curses"))
([]const u8)("libncurses.a")
else
b.fmt("lib{}.a", lib);
const static_lib_name = os.path.join(b.allocator, lib_dir, static_bare_name) catch unreachable;
const have_static = fileExists(static_lib_name) catch unreachable;
if (have_static) {
lib_exe_obj.addObjectFile(static_lib_name);
} else {
lib_exe_obj.linkSystemLibrary(lib);
}
}
for (dep.libs.toSliceConst()) |lib| {
lib_exe_obj.addObjectFile(lib);
}
for (dep.includes.toSliceConst()) |include_path| {
lib_exe_obj.addIncludeDir(include_path);
}
}
fn fileExists(filename: []const u8) !bool {
os.File.access(filename) catch |err| switch (err) {
error.PermissionDenied,
error.FileNotFound,
=> return false,
else => return err,
};
return true;
}
fn addCppLib(b: *Builder, lib_exe_obj: var, cmake_binary_dir: []const u8, lib_name: []const u8) void {
const lib_prefix = if (lib_exe_obj.target.isWindows()) "" else "lib";
lib_exe_obj.addObjectFile(os.path.join(b.allocator, cmake_binary_dir, "zig_cpp", b.fmt("{}{}{}", lib_prefix, lib_name, lib_exe_obj.target.libFileExt())) catch unreachable);
}
const LibraryDep = struct {
prefix: []const u8,
libdirs: ArrayList([]const u8),
libs: ArrayList([]const u8),
system_libs: ArrayList([]const u8),
includes: ArrayList([]const u8),
};
fn findLLVM(b: *Builder, llvm_config_exe: []const u8) !LibraryDep {
const shared_mode = try b.exec([][]const u8{ llvm_config_exe, "--shared-mode" });
const is_static = mem.startsWith(u8, shared_mode, "static");
const libs_output = if (is_static)
try b.exec([][]const u8{
llvm_config_exe,
"--libfiles",
"--system-libs",
})
else
try b.exec([][]const u8{
llvm_config_exe,
"--libs",
});
const includes_output = try b.exec([][]const u8{ llvm_config_exe, "--includedir" });
const libdir_output = try b.exec([][]const u8{ llvm_config_exe, "--libdir" });
const prefix_output = try b.exec([][]const u8{ llvm_config_exe, "--prefix" });
var result = LibraryDep{
.prefix = mem.split(prefix_output, " \r\n").next().?,
.libs = ArrayList([]const u8).init(b.allocator),
.system_libs = ArrayList([]const u8).init(b.allocator),
.includes = ArrayList([]const u8).init(b.allocator),
.libdirs = ArrayList([]const u8).init(b.allocator),
};
{
var it = mem.split(libs_output, " \r\n");
while (it.next()) |lib_arg| {
if (mem.startsWith(u8, lib_arg, "-l")) {
try result.system_libs.append(lib_arg[2..]);
} else {
if (os.path.isAbsolute(lib_arg)) {
try result.libs.append(lib_arg);
} else {
try result.system_libs.append(lib_arg);
}
}
}
}
{
var it = mem.split(includes_output, " \r\n");
while (it.next()) |include_arg| {
if (mem.startsWith(u8, include_arg, "-I")) {
try result.includes.append(include_arg[2..]);
} else {
try result.includes.append(include_arg);
}
}
}
{
var it = mem.split(libdir_output, " \r\n");
while (it.next()) |libdir| {
if (mem.startsWith(u8, libdir, "-L")) {
try result.libdirs.append(libdir[2..]);
} else {
try result.libdirs.append(libdir);
}
}
}
return result;
}
pub fn installStdLib(b: *Builder, stdlib_files: []const u8) void {
var it = mem.split(stdlib_files, ";");
while (it.next()) |stdlib_file| {
const src_path = os.path.join(b.allocator, "std", stdlib_file) catch unreachable;
const dest_path = os.path.join(b.allocator, "lib", "zig", "std", stdlib_file) catch unreachable;
b.installFile(src_path, dest_path);
}
}
pub fn installCHeaders(b: *Builder, c_header_files: []const u8) void {
var it = mem.split(c_header_files, ";");
while (it.next()) |c_header_file| {
const src_path = os.path.join(b.allocator, "c_headers", c_header_file) catch unreachable;
const dest_path = os.path.join(b.allocator, "lib", "zig", "include", c_header_file) catch unreachable;
b.installFile(src_path, dest_path);
}
}
fn nextValue(index: *usize, build_info: []const u8) []const u8 {
const start = index.*;
while (true) : (index.* += 1) {
switch (build_info[index.*]) {
'\n' => {
const result = build_info[start..index.*];
index.* += 1;
return result;
},
'\r' => {
const result = build_info[start..index.*];
index.* += 2;
return result;
},
else => continue,
}
}
}
fn configureStage2(b: *Builder, exe: var, ctx: Context) !void {
exe.setNoRoSegment(ctx.no_rosegment);
exe.addIncludeDir("src");
exe.addIncludeDir(ctx.cmake_binary_dir);
addCppLib(b, exe, ctx.cmake_binary_dir, "zig_cpp");
if (ctx.lld_include_dir.len != 0) {
exe.addIncludeDir(ctx.lld_include_dir);
var it = mem.split(ctx.lld_libraries, ";");
while (it.next()) |lib| {
exe.addObjectFile(lib);
}
} else {
addCppLib(b, exe, ctx.cmake_binary_dir, "embedded_lld_wasm");
addCppLib(b, exe, ctx.cmake_binary_dir, "embedded_lld_elf");
addCppLib(b, exe, ctx.cmake_binary_dir, "embedded_lld_coff");
addCppLib(b, exe, ctx.cmake_binary_dir, "embedded_lld_lib");
}
dependOnLib(b, exe, ctx.llvm);
if (exe.target.getOs() == builtin.Os.linux) {
try addCxxKnownPath(b, ctx, exe, "libstdc++.a",
\\Unable to determine path to libstdc++.a
\\On Fedora, install libstdc++-static and try again.
\\
);
exe.linkSystemLibrary("pthread");
} else if (exe.target.isDarwin() or exe.target.isFreeBSD()) {
if (addCxxKnownPath(b, ctx, exe, "libgcc_eh.a", "")) {
// Compiler is GCC.
try addCxxKnownPath(b, ctx, exe, "libstdc++.a", null);
exe.linkSystemLibrary("pthread");
// TODO LLD cannot perform this link.
// See https://github.com/ziglang/zig/issues/1535
exe.enableSystemLinkerHack();
} else |err| switch (err) {
error.RequiredLibraryNotFound => {
// System compiler, not gcc.
exe.linkSystemLibrary("c++");
},
else => return err,
}
}
if (ctx.dia_guids_lib.len != 0) {
exe.addObjectFile(ctx.dia_guids_lib);
}
exe.linkSystemLibrary("c");
}
fn addCxxKnownPath(
b: *Builder,
ctx: Context,
exe: var,
objname: []const u8,
errtxt: ?[]const u8,
) !void {
const path_padded = try b.exec([][]const u8{
ctx.cxx_compiler,
b.fmt("-print-file-name={}", objname),
});
const path_unpadded = mem.split(path_padded, "\r\n").next().?;
if (mem.eql(u8, path_unpadded, objname)) {
if (errtxt) |msg| {
warn("{}", msg);
} else {
warn("Unable to determine path to {}\n", objname);
}
return error.RequiredLibraryNotFound;
}
exe.addObjectFile(path_unpadded);
}
const Context = struct {
cmake_binary_dir: []const u8,
cxx_compiler: []const u8,
llvm_config_exe: []const u8,
lld_include_dir: []const u8,
lld_libraries: []const u8,
std_files: []const u8,
c_header_files: []const u8,
dia_guids_lib: []const u8,
llvm: LibraryDep,
no_rosegment: bool,
}; | build.zig |
const std = @import("std.zig");
const assert = std.debug.assert;
const testing = std.testing;
const mem = std.mem; // For mem.Compare
const Color = enum(u1) {
Black,
Red,
};
const Red = Color.Red;
const Black = Color.Black;
const ReplaceError = error{NotEqual};
/// Insert this into your struct that you want to add to a red-black tree.
/// Do not use a pointer. Turn the *rb.Node results of the functions in rb
/// (after resolving optionals) to your structure using @fieldParentPtr(). Example:
///
/// const Number = struct {
/// node: rb.Node,
/// value: i32,
/// };
/// fn number(node: *rb.Node) Number {
/// return @fieldParentPtr(Number, "node", node);
/// }
pub const Node = struct {
left: ?*Node,
right: ?*Node,
/// parent | color
parent_and_color: usize,
pub fn next(constnode: *Node) ?*Node {
var node = constnode;
if (node.right) |right| {
var n = right;
while (n.left) |left|
n = left;
return n;
}
while (true) {
var parent = node.getParent();
if (parent) |p| {
if (node != p.right)
return p;
node = p;
} else
return null;
}
}
pub fn prev(constnode: *Node) ?*Node {
var node = constnode;
if (node.left) |left| {
var n = left;
while (n.right) |right|
n = right;
return n;
}
while (true) {
var parent = node.getParent();
if (parent) |p| {
if (node != p.left)
return p;
node = p;
} else
return null;
}
}
pub fn isRoot(node: *Node) bool {
return node.getParent() == null;
}
fn isRed(node: *Node) bool {
return node.getColor() == Red;
}
fn isBlack(node: *Node) bool {
return node.getColor() == Black;
}
fn setParent(node: *Node, parent: ?*Node) void {
node.parent_and_color = @ptrToInt(parent) | (node.parent_and_color & 1);
}
fn getParent(node: *Node) ?*Node {
const mask: usize = 1;
comptime {
assert(@alignOf(*Node) >= 2);
}
const maybe_ptr = node.parent_and_color & ~mask;
return if (maybe_ptr == 0) null else @intToPtr(*Node, maybe_ptr);
}
fn setColor(node: *Node, color: Color) void {
const mask: usize = 1;
node.parent_and_color = (node.parent_and_color & ~mask) | @enumToInt(color);
}
fn getColor(node: *Node) Color {
return @intToEnum(Color, @intCast(u1, node.parent_and_color & 1));
}
fn setChild(node: *Node, child: ?*Node, is_left: bool) void {
if (is_left) {
node.left = child;
} else {
node.right = child;
}
}
fn getFirst(nodeconst: *Node) *Node {
var node = nodeconst;
while (node.left) |left| {
node = left;
}
return node;
}
fn getLast(node: *Node) *Node {
while (node.right) |right| {
node = right;
}
return node;
}
};
pub const Tree = struct {
root: ?*Node,
compareFn: fn (*Node, *Node) mem.Compare,
/// If you have a need for a version that caches this, please file a bug.
pub fn first(tree: *Tree) ?*Node {
var node: *Node = tree.root orelse return null;
while (node.left) |left| {
node = left;
}
return node;
}
pub fn last(tree: *Tree) ?*Node {
var node: *Node = tree.root orelse return null;
while (node.right) |right| {
node = right;
}
return node;
}
/// Duplicate keys are not allowed. The item with the same key already in the
/// tree will be returned, and the item will not be inserted.
pub fn insert(tree: *Tree, node_const: *Node) ?*Node {
var node = node_const;
var maybe_key: ?*Node = undefined;
var maybe_parent: ?*Node = undefined;
var is_left: bool = undefined;
maybe_key = doLookup(node, tree, &maybe_parent, &is_left);
if (maybe_key) |key| {
return key;
}
node.left = null;
node.right = null;
node.setColor(Red);
node.setParent(maybe_parent);
if (maybe_parent) |parent| {
parent.setChild(node, is_left);
} else {
tree.root = node;
}
while (node.getParent()) |*parent| {
if (parent.*.isBlack())
break;
// the root is always black
var grandpa = parent.*.getParent() orelse unreachable;
if (parent.* == grandpa.left) {
var maybe_uncle = grandpa.right;
if (maybe_uncle) |uncle| {
if (uncle.isBlack())
break;
parent.*.setColor(Black);
uncle.setColor(Black);
grandpa.setColor(Red);
node = grandpa;
} else {
if (node == parent.*.right) {
rotateLeft(parent.*, tree);
node = parent.*;
parent.* = node.getParent().?; // Just rotated
}
parent.*.setColor(Black);
grandpa.setColor(Red);
rotateRight(grandpa, tree);
}
} else {
var maybe_uncle = grandpa.left;
if (maybe_uncle) |uncle| {
if (uncle.isBlack())
break;
parent.*.setColor(Black);
uncle.setColor(Black);
grandpa.setColor(Red);
node = grandpa;
} else {
if (node == parent.*.left) {
rotateRight(parent.*, tree);
node = parent.*;
parent.* = node.getParent().?; // Just rotated
}
parent.*.setColor(Black);
grandpa.setColor(Red);
rotateLeft(grandpa, tree);
}
}
}
// This was an insert, there is at least one node.
tree.root.?.setColor(Black);
return null;
}
/// lookup searches for the value of key, using binary search. It will
/// return a pointer to the node if it is there, otherwise it will return null.
/// Complexity guaranteed O(log n), where n is the number of nodes book-kept
/// by tree.
pub fn lookup(tree: *Tree, key: *Node) ?*Node {
var parent: ?*Node = undefined;
var is_left: bool = undefined;
return doLookup(key, tree, &parent, &is_left);
}
pub fn remove(tree: *Tree, nodeconst: *Node) void {
var node = nodeconst;
// as this has the same value as node, it is unsafe to access node after newnode
var newnode: ?*Node = nodeconst;
var maybe_parent: ?*Node = node.getParent();
var color: Color = undefined;
var next: *Node = undefined;
// This clause is to avoid optionals
if (node.left == null and node.right == null) {
if (maybe_parent) |parent| {
parent.setChild(null, parent.left == node);
} else
tree.root = null;
color = node.getColor();
newnode = null;
} else {
if (node.left == null) {
next = node.right.?; // Not both null as per above
} else if (node.right == null) {
next = node.left.?; // Not both null as per above
} else
next = node.right.?.getFirst(); // Just checked for null above
if (maybe_parent) |parent| {
parent.setChild(next, parent.left == node);
} else
tree.root = next;
if (node.left != null and node.right != null) {
const left = node.left.?;
const right = node.right.?;
color = next.getColor();
next.setColor(node.getColor());
next.left = left;
left.setParent(next);
if (next != right) {
var parent = next.getParent().?; // Was traversed via child node (right/left)
next.setParent(node.getParent());
newnode = next.right;
parent.left = node;
next.right = right;
right.setParent(next);
} else {
next.setParent(maybe_parent);
maybe_parent = next;
newnode = next.right;
}
} else {
color = node.getColor();
newnode = next;
}
}
if (newnode) |n|
n.setParent(maybe_parent);
if (color == Red)
return;
if (newnode) |n| {
n.setColor(Black);
return;
}
while (node == tree.root) {
// If not root, there must be parent
var parent = maybe_parent.?;
if (node == parent.left) {
var sibling = parent.right.?; // Same number of black nodes.
if (sibling.isRed()) {
sibling.setColor(Black);
parent.setColor(Red);
rotateLeft(parent, tree);
sibling = parent.right.?; // Just rotated
}
if ((if (sibling.left) |n| n.isBlack() else true) and
(if (sibling.right) |n| n.isBlack() else true))
{
sibling.setColor(Red);
node = parent;
maybe_parent = parent.getParent();
continue;
}
if (if (sibling.right) |n| n.isBlack() else true) {
sibling.left.?.setColor(Black); // Same number of black nodes.
sibling.setColor(Red);
rotateRight(sibling, tree);
sibling = parent.right.?; // Just rotated
}
sibling.setColor(parent.getColor());
parent.setColor(Black);
sibling.right.?.setColor(Black); // Same number of black nodes.
rotateLeft(parent, tree);
newnode = tree.root;
break;
} else {
var sibling = parent.left.?; // Same number of black nodes.
if (sibling.isRed()) {
sibling.setColor(Black);
parent.setColor(Red);
rotateRight(parent, tree);
sibling = parent.left.?; // Just rotated
}
if ((if (sibling.left) |n| n.isBlack() else true) and
(if (sibling.right) |n| n.isBlack() else true))
{
sibling.setColor(Red);
node = parent;
maybe_parent = parent.getParent();
continue;
}
if (if (sibling.left) |n| n.isBlack() else true) {
sibling.right.?.setColor(Black); // Same number of black nodes
sibling.setColor(Red);
rotateLeft(sibling, tree);
sibling = parent.left.?; // Just rotated
}
sibling.setColor(parent.getColor());
parent.setColor(Black);
sibling.left.?.setColor(Black); // Same number of black nodes
rotateRight(parent, tree);
newnode = tree.root;
break;
}
if (node.isRed())
break;
}
if (newnode) |n|
n.setColor(Black);
}
/// This is a shortcut to avoid removing and re-inserting an item with the same key.
pub fn replace(tree: *Tree, old: *Node, newconst: *Node) !void {
var new = newconst;
// I assume this can get optimized out if the caller already knows.
if (tree.compareFn(old, new) != mem.Compare.Equal) return ReplaceError.NotEqual;
if (old.getParent()) |parent| {
parent.setChild(new, parent.left == old);
} else
tree.root = new;
if (old.left) |left|
left.setParent(new);
if (old.right) |right|
right.setParent(new);
new.* = old.*;
}
pub fn init(tree: *Tree, f: fn (*Node, *Node) mem.Compare) void {
tree.root = null;
tree.compareFn = f;
}
};
fn rotateLeft(node: *Node, tree: *Tree) void {
var p: *Node = node;
var q: *Node = node.right orelse unreachable;
var parent: *Node = undefined;
if (!p.isRoot()) {
parent = p.getParent().?;
if (parent.left == p) {
parent.left = q;
} else {
parent.right = q;
}
q.setParent(parent);
} else {
tree.root = q;
q.setParent(null);
}
p.setParent(q);
p.right = q.left;
if (p.right) |right| {
right.setParent(p);
}
q.left = p;
}
fn rotateRight(node: *Node, tree: *Tree) void {
var p: *Node = node;
var q: *Node = node.left orelse unreachable;
var parent: *Node = undefined;
if (!p.isRoot()) {
parent = p.getParent().?;
if (parent.left == p) {
parent.left = q;
} else {
parent.right = q;
}
q.setParent(parent);
} else {
tree.root = q;
q.setParent(null);
}
p.setParent(q);
p.left = q.right;
if (p.left) |left| {
left.setParent(p);
}
q.right = p;
}
fn doLookup(key: *Node, tree: *Tree, pparent: *?*Node, is_left: *bool) ?*Node {
var maybe_node: ?*Node = tree.root;
pparent.* = null;
is_left.* = false;
while (maybe_node) |node| {
var res: mem.Compare = tree.compareFn(node, key);
if (res == mem.Compare.Equal) {
return node;
}
pparent.* = node;
if (res == mem.Compare.GreaterThan) {
is_left.* = true;
maybe_node = node.left;
} else if (res == mem.Compare.LessThan) {
is_left.* = false;
maybe_node = node.right;
} else {
unreachable;
}
}
return null;
}
const testNumber = struct {
node: Node,
value: usize,
};
fn testGetNumber(node: *Node) *testNumber {
return @fieldParentPtr(testNumber, "node", node);
}
fn testCompare(l: *Node, r: *Node) mem.Compare {
var left = testGetNumber(l);
var right = testGetNumber(r);
if (left.value < right.value) {
return mem.Compare.LessThan;
} else if (left.value == right.value) {
return mem.Compare.Equal;
} else if (left.value > right.value) {
return mem.Compare.GreaterThan;
}
unreachable;
}
test "rb" {
var tree: Tree = undefined;
var ns: [10]testNumber = undefined;
ns[0].value = 42;
ns[1].value = 41;
ns[2].value = 40;
ns[3].value = 39;
ns[4].value = 38;
ns[5].value = 39;
ns[6].value = 3453;
ns[7].value = 32345;
ns[8].value = 392345;
ns[9].value = 4;
var dup: testNumber = undefined;
dup.value = 32345;
tree.init(testCompare);
_ = tree.insert(&ns[1].node);
_ = tree.insert(&ns[2].node);
_ = tree.insert(&ns[3].node);
_ = tree.insert(&ns[4].node);
_ = tree.insert(&ns[5].node);
_ = tree.insert(&ns[6].node);
_ = tree.insert(&ns[7].node);
_ = tree.insert(&ns[8].node);
_ = tree.insert(&ns[9].node);
tree.remove(&ns[3].node);
testing.expect(tree.insert(&dup.node) == &ns[7].node);
try tree.replace(&ns[7].node, &dup.node);
var num: *testNumber = undefined;
num = testGetNumber(tree.first().?);
while (num.node.next() != null) {
testing.expect(testGetNumber(num.node.next().?).value > num.value);
num = testGetNumber(num.node.next().?);
}
}
test "inserting and looking up" {
var tree: Tree = undefined;
tree.init(testCompare);
var number: testNumber = undefined;
number.value = 1000;
_ = tree.insert(&number.node);
var dup: testNumber = undefined;
//Assert that tuples with identical value fields finds the same pointer
dup.value = 1000;
assert(tree.lookup(&dup.node) == &number.node);
//Assert that tuples with identical values do not clobber when inserted.
_ = tree.insert(&dup.node);
assert(tree.lookup(&dup.node) == &number.node);
assert(tree.lookup(&number.node) != &dup.node);
assert(testGetNumber(tree.lookup(&dup.node).?).value == testGetNumber(&dup.node).value);
//Assert that if looking for a non-existing value, return null.
var non_existing_value: testNumber = undefined;
non_existing_value.value = 1234;
assert(tree.lookup(&non_existing_value.node) == null);
}
test "multiple inserts, followed by calling first and last" {
var tree: Tree = undefined;
tree.init(testCompare);
var zeroth: testNumber = undefined;
zeroth.value = 0;
var first: testNumber = undefined;
first.value = 1;
var second: testNumber = undefined;
second.value = 2;
var third: testNumber = undefined;
third.value = 3;
_ = tree.insert(&zeroth.node);
_ = tree.insert(&first.node);
_ = tree.insert(&second.node);
_ = tree.insert(&third.node);
assert(testGetNumber(tree.first().?).value == 0);
assert(testGetNumber(tree.last().?).value == 3);
var lookupNode: testNumber = undefined;
lookupNode.value = 3;
assert(tree.lookup(&lookupNode.node) == &third.node);
} | std/rb.zig |
const std = @import("std");
const mem = std.mem;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const expect = std.testing.expect;
/// Type-generic map implemented as an AVL tree.
pub fn Map(comptime Key: type, comptime Value: type, lessThan: fn (Key, Key) bool) type {
return struct {
const Self = @This();
_allocator: *Allocator,
_root: ?*_Node,
_size: usize,
/// Key+value pair.
const KeyValue = struct {
_key: Key,
_value: Value,
/// Obtain the key.
pub fn key(self: KeyValue) Key {
return self._key;
}
/// Obtain the value.
pub fn value(self: KeyValue) Value {
return self._value;
}
};
/// Internal tree node.
const _Node = struct {
_kv: KeyValue,
_parent: ?*_Node,
_left: ?*_Node,
_right: ?*_Node,
_balance: i2,
};
/// Map iterator.
const Iterator = struct {
_node: ?*_Node,
/// Return the current value and advance the iterator.
pub fn next(self: *Iterator) ?*KeyValue {
if (self._node == null) {
return null;
}
const res = &self._node.?._kv;
if (self._node.?._right) |right_node| {
var successor_node = right_node;
while (successor_node._left) |left_node| successor_node = left_node;
self._node = successor_node;
return res;
}
var child_node = self._node.?;
while (true) {
if (child_node._parent == null) {
self._node = null;
return res;
}
const parent_node = child_node._parent.?;
if (parent_node._left == child_node) {
self._node = parent_node;
return res;
}
child_node = parent_node;
}
}
/// Return whether the iterator is valid, i.e. it currently points to a valid node.
pub fn valid(self: Iterator) bool {
return self._node != null;
}
/// Obtain the current key.
pub fn key(self: Iterator) Key {
assert(self._node != null);
return self._node.?._kv._key;
}
/// Obtain the current value.
pub fn value(self: Iterator) Value {
assert(self._node != null);
return self._node.?._kv._value;
}
};
/// Construct a new Map.
pub fn init(allocator: *Allocator) Self {
return Self{
._allocator = allocator,
._root = null,
._size = 0,
};
}
/// Finalize the map.
pub fn deinit(self: *Self) void {
self.clear();
}
/// Clear the map by removing all its nodes.
pub fn clear(self: *Self) void {
if (self._root == null) {
assert(self._size == 0);
return;
}
// Deallocate all nodes.
var node = self._root.?;
while (true) {
// Perform post-order traversal.
if (node._left) |left_node| {
node = left_node;
continue;
}
if (node._right) |right_node| {
node = right_node;
continue;
}
// Disconnect the node from the parent.
const maybe_parent_node = node._parent;
if (maybe_parent_node) |parent_node| {
if (parent_node._left == node) {
parent_node._left = null;
} else {
parent_node._right = null;
}
}
// Destroy the node.
self._allocator.destroy(node);
// Continue the traversal.
if (maybe_parent_node) |parent_node| {
node = parent_node;
} else {
break;
}
}
self._root = null;
self._size = 0;
}
/// Insert a new key+value pair in the map. Upon successful completion, an iterator pointing
/// to the inserted pair is returned. Otherwise, an error is returned.
pub fn insert(self: *Self, key: Key, value: Value) !Iterator {
// Allocate a new node.
const node = try self._allocator.create(_Node);
node.* = _Node{
._kv = KeyValue{ ._key = key, ._value = value },
._parent = null,
._left = null,
._right = null,
._balance = 0,
};
// Find the insertion point.
if (self._root == null) {
self._root = node;
} else {
var insert_node = self._root.?;
while (true) {
if (lessThan(key, insert_node._kv._key)) {
if (insert_node._left) |left_node| {
insert_node = left_node;
continue;
}
// Insert the node and rebalance the tree.
insert_node._left = node;
node._parent = insert_node;
self._insertBalance(insert_node, -1);
break;
} else {
assert(lessThan(insert_node._kv._key, key));
if (insert_node._right) |right_node| {
insert_node = right_node;
continue;
}
insert_node._right = node;
node._parent = insert_node;
self._insertBalance(insert_node, 1);
break;
}
}
}
self._size += 1;
return Iterator{ ._node = node };
}
/// Remove the key+value pair pointed by a specified iterator from the map.
pub fn remove(self: *Self, iter: Iterator) void {
const node = iter._node.?;
const maybe_left_node = node._left;
const maybe_right_node = node._right;
const maybe_parent_node = node._parent;
if (maybe_left_node) |left_node| {
if (maybe_right_node) |right_node| {
// Find the successor and use it to replace the deleted node.
var successor_node = right_node;
while (successor_node._left) |successor_left_node| {
successor_node = successor_left_node;
}
const parent_node = node._parent;
const successor_parent_node = successor_node._parent.?;
const maybe_successor_right_node = successor_node._right;
if (successor_parent_node != node) {
assert(successor_parent_node._left == successor_node);
assert(successor_node._left == null);
successor_parent_node._left = maybe_successor_right_node;
if (maybe_successor_right_node) |successor_right_node| {
successor_right_node._parent = successor_parent_node;
}
}
successor_node._parent = parent_node;
successor_node._left = left_node;
left_node._parent = successor_node;
if (successor_parent_node != node) {
successor_node._right = right_node;
right_node._parent = successor_node;
}
successor_node._balance = node._balance;
if (node == self._root) {
self._root = successor_node;
} else if (maybe_parent_node.?._right == node) {
maybe_parent_node.?._right = successor_node;
} else {
maybe_parent_node.?._left = successor_node;
}
// Rebalance the tree.
if (successor_parent_node != node) {
self._removeBalance(successor_parent_node, 1);
} else {
self._removeBalance(successor_node, -1);
}
} else {
// Use the left child to replace the deleted node.
left_node._parent = maybe_parent_node;
if (node == self._root) {
self._root = left_node;
} else if (maybe_parent_node.?._right == node) {
maybe_parent_node.?._right = left_node;
} else {
maybe_parent_node.?._left = left_node;
}
self._removeBalance(left_node, 0);
}
} else {
if (maybe_right_node) |right_node| {
// Use the right child to replace the deleted node.
right_node._parent = maybe_parent_node;
if (node == self._root) {
self._root = right_node;
} else if (maybe_parent_node.?._right == node) {
maybe_parent_node.?._right = right_node;
} else {
maybe_parent_node.?._left = right_node;
}
self._removeBalance(right_node, 0);
} else {
// Remove the leaf node.
if (node == self._root) {
self._root = null;
} else if (maybe_parent_node.?._right == node) {
maybe_parent_node.?._right = null;
self._removeBalance(maybe_parent_node.?, -1);
} else {
maybe_parent_node.?._left = null;
self._removeBalance(maybe_parent_node.?, 1);
}
}
}
self._size -= 1;
self._allocator.destroy(node);
}
/// Find the key+value pair that matches a specified key. Upon successful completion, an
/// iterator pointing to the found pair is returned. Otherwise, an invalid iterator is
/// returned.
pub fn find(self: *const Self, key: Key) Iterator {
var maybe_node = self._root;
while (maybe_node) |node| {
if (lessThan(key, node._kv._key)) {
maybe_node = node._left;
} else if (lessThan(node._kv._key, key)) {
maybe_node = node._right;
} else {
return Iterator{ ._node = node };
}
}
return Iterator{ ._node = null };
}
/// Obtain an iterator to traverse the map.
pub fn iterator(self: *const Self) Iterator {
if (self._root == null) {
return Iterator{ ._node = null };
}
var node = self._root.?;
while (node._left) |left_node| {
node = left_node;
}
return Iterator{ ._node = node };
}
/// Return a number of key+value pairs currently inserted in the map.
pub fn count(self: *const Self) usize {
return self._size;
}
fn _insertBalance(self: *Self, insert_node: *_Node, balance: i2) void {
var maybe_node: ?*_Node = insert_node;
var change_balance = balance;
while (maybe_node) |node| {
const new_balance = @as(i3, node._balance) + change_balance;
if (new_balance == 0) {
node._balance = 0;
return;
}
if (new_balance == -2) {
if (node._left.?._balance == -1) {
_ = self._rotateRight(node);
} else {
_ = self._rotateLeftRight(node);
}
return;
}
if (new_balance == 2) {
if (node._right.?._balance == 1) {
_ = self._rotateLeft(node);
} else {
_ = self._rotateRightLeft(node);
}
return;
}
node._balance = @intCast(i2, new_balance);
const maybe_parent_node = node._parent;
if (maybe_parent_node) |parent_node| {
change_balance = if (parent_node._left == node) -1 else 1;
}
maybe_node = maybe_parent_node;
}
}
fn _removeBalance(self: *Self, remove_node: *_Node, balance: i2) void {
var maybe_node: ?*_Node = remove_node;
var change_balance = balance;
while (maybe_node) |node| {
const new_balance = @as(i3, node._balance) + change_balance;
var next_node: *_Node = undefined;
if (new_balance == 0) {
node._balance = 0;
next_node = node;
} else if (new_balance == -2) {
if (node._left.?._balance <= 0) {
next_node = self._rotateRight(node);
if (next_node._balance == 1) {
return;
}
} else {
next_node = self._rotateLeftRight(node);
}
} else if (new_balance == 2) {
if (node._right.?._balance >= 0) {
next_node = self._rotateLeft(node);
if (next_node._balance == -1) {
return;
}
} else {
next_node = self._rotateRightLeft(node);
}
} else {
node._balance = @intCast(i2, new_balance);
return;
}
const maybe_parent_node = next_node._parent;
if (maybe_parent_node) |parent_node| {
change_balance = if (parent_node._left == next_node) 1 else -1;
}
maybe_node = maybe_parent_node;
}
}
fn _rotateLeft(self: *Self, node: *_Node) *_Node {
const right_node = node._right.?;
const maybe_right_left_node = right_node._left;
const maybe_parent_node = node._parent;
right_node._parent = maybe_parent_node;
right_node._left = node;
node._right = maybe_right_left_node;
node._parent = right_node;
if (maybe_right_left_node) |right_left_node| {
right_left_node._parent = node;
}
if (node == self._root) {
self._root = right_node;
} else if (maybe_parent_node.?._right == node) {
maybe_parent_node.?._right = right_node;
} else {
maybe_parent_node.?._left = right_node;
}
if (right_node._balance == 1) {
node._balance = 0;
right_node._balance = 0;
} else {
assert(right_node._balance == 0);
node._balance = 1;
right_node._balance = -1;
}
return right_node;
}
fn _rotateRight(self: *Self, node: *_Node) *_Node {
const left_node = node._left.?;
const maybe_left_right_node = left_node._right;
const maybe_parent_node = node._parent;
left_node._parent = maybe_parent_node;
left_node._right = node;
node._parent = left_node;
node._left = maybe_left_right_node;
if (maybe_left_right_node) |left_right_node| {
left_right_node._parent = node;
}
if (node == self._root) {
self._root = left_node;
} else if (maybe_parent_node.?._left == node) {
maybe_parent_node.?._left = left_node;
} else {
maybe_parent_node.?._right = left_node;
}
if (left_node._balance == -1) {
node._balance = 0;
left_node._balance = 0;
} else {
assert(left_node._balance == 0);
node._balance = -1;
left_node._balance = 1;
}
return left_node;
}
fn _rotateLeftRight(self: *Self, node: *_Node) *_Node {
const left_node = node._left.?;
const left_right_node = left_node._right.?;
const maybe_parent_node = node._parent;
const maybe_left_right_right_node = left_right_node._right;
const maybe_left_right_left_node = left_right_node._left;
left_right_node._parent = maybe_parent_node;
left_right_node._left = left_node;
left_right_node._right = node;
left_node._parent = left_right_node;
left_node._right = maybe_left_right_left_node;
node._parent = left_right_node;
node._left = maybe_left_right_right_node;
if (maybe_left_right_right_node) |left_right_right_node| {
left_right_right_node._parent = node;
}
if (maybe_left_right_left_node) |left_right_left_node| {
left_right_left_node._parent = left_node;
}
if (node == self._root) {
self._root = left_right_node;
} else if (maybe_parent_node.?._left == node) {
maybe_parent_node.?._left = left_right_node;
} else {
maybe_parent_node.?._right = left_right_node;
}
if (left_right_node._balance == 1) {
node._balance = 0;
left_node._balance = -1;
} else if (left_right_node._balance == 0) {
node._balance = 0;
left_node._balance = 0;
} else {
assert(left_right_node._balance == -1);
node._balance = 1;
left_node._balance = 0;
}
left_right_node._balance = 0;
return left_right_node;
}
fn _rotateRightLeft(self: *Self, node: *_Node) *_Node {
const right_node = node._right.?;
const right_left_node = right_node._left.?;
const maybe_parent_node = node._parent;
const maybe_right_left_left_node = right_left_node._left;
const maybe_right_left_right_node = right_left_node._right;
right_left_node._parent = maybe_parent_node;
right_left_node._right = right_node;
right_left_node._left = node;
right_node._parent = right_left_node;
right_node._left = maybe_right_left_right_node;
node._parent = right_left_node;
node._right = maybe_right_left_left_node;
if (maybe_right_left_left_node) |right_left_left_node| {
right_left_left_node._parent = node;
}
if (maybe_right_left_right_node) |right_left_right_node| {
right_left_right_node._parent = right_node;
}
if (node == self._root) {
self._root = right_left_node;
} else if (maybe_parent_node.?._right == node) {
maybe_parent_node.?._right = right_left_node;
} else {
maybe_parent_node.?._left = right_left_node;
}
if (right_left_node._balance == -1) {
node._balance = 0;
right_node._balance = 1;
} else if (right_left_node._balance == 0) {
node._balance = 0;
right_node._balance = 0;
} else {
assert(right_left_node._balance == 1);
node._balance = -1;
right_node._balance = 0;
}
right_left_node._balance = 0;
return right_left_node;
}
};
}
/// Instantiate a less-than function for a given type.
pub fn getLessThanFn(comptime T: type) fn (T, T) bool {
return struct {
fn lessThan(lhs: T, rhs: T) bool {
switch (@typeInfo(T)) {
.Pointer => |ptr_info| switch (ptr_info.size) {
.One => {
return @ptrToInt(lhs) < @ptrToInt(rhs);
},
.Slice => {
return mem.lessThan(ptr_info.child, lhs, rhs);
},
else => {},
},
else => {},
}
return lhs < rhs;
}
}.lessThan;
}
const IntToStrMap = Map(i32, []const u8, getLessThanFn(i32));
const IntSet = Map(i32, void, getLessThanFn(i32));
test "insert - left rotation" {
var arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_allocator.deinit();
var ismap = IntToStrMap.init(&arena_allocator.allocator);
defer ismap.deinit();
const iter0 = try ismap.insert(0, "0");
const node0 = iter0._node.?;
expect(node0._kv._key == 0);
expect(mem.eql(u8, node0._kv._value, "0"));
expect(ismap._root == node0);
expect(ismap._size == 1);
expect(node0._parent == null);
expect(node0._left == null);
expect(node0._right == null);
expect(node0._balance == 0);
const iter1 = try ismap.insert(1, "1");
const node1 = iter1._node.?;
expect(node1._kv._key == 1);
expect(mem.eql(u8, node1._kv._value, "1"));
expect(ismap._root == node0);
expect(ismap._size == 2);
expect(node0._parent == null);
expect(node0._left == null);
expect(node0._right == node1);
expect(node0._balance == 1);
expect(node1._parent == node0);
expect(node1._left == null);
expect(node1._right == null);
expect(node1._balance == 0);
const iter2 = try ismap.insert(2, "2");
const node2 = iter2._node.?;
expect(node2._kv._key == 2);
expect(mem.eql(u8, node2._kv._value, "2"));
expect(ismap._root == node1);
expect(ismap._size == 3);
expect(node0._parent == node1);
expect(node0._left == null);
expect(node0._right == null);
expect(node0._balance == 0);
expect(node1._parent == null);
expect(node1._left == node0);
expect(node1._right == node2);
expect(node1._balance == 0);
expect(node2._parent == node1);
expect(node2._left == null);
expect(node2._right == null);
expect(node2._balance == 0);
}
test "insert - right rotation" {
var arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_allocator.deinit();
var ismap = IntToStrMap.init(&arena_allocator.allocator);
defer ismap.deinit();
const iter2 = try ismap.insert(2, "2");
const node2 = iter2._node.?;
expect(node2._kv._key == 2);
expect(mem.eql(u8, node2._kv._value, "2"));
expect(ismap._root == node2);
expect(ismap._size == 1);
expect(node2._parent == null);
expect(node2._left == null);
expect(node2._right == null);
expect(node2._balance == 0);
const iter1 = try ismap.insert(1, "1");
const node1 = iter1._node.?;
expect(node1._kv._key == 1);
expect(mem.eql(u8, node1._kv._value, "1"));
expect(ismap._root == node2);
expect(ismap._size == 2);
expect(node1._parent == node2);
expect(node1._left == null);
expect(node1._right == null);
expect(node1._balance == 0);
expect(node2._parent == null);
expect(node2._left == node1);
expect(node2._right == null);
expect(node2._balance == -1);
const iter0 = try ismap.insert(0, "0");
const node0 = iter0._node.?;
expect(node0._kv._key == 0);
expect(mem.eql(u8, node0._kv._value, "0"));
expect(ismap._root == node1);
expect(ismap._size == 3);
expect(node0._parent == node1);
expect(node0._left == null);
expect(node0._right == null);
expect(node0._balance == 0);
expect(node1._parent == null);
expect(node1._left == node0);
expect(node1._right == node2);
expect(node1._balance == 0);
expect(node2._parent == node1);
expect(node2._left == null);
expect(node2._right == null);
expect(node2._balance == 0);
}
test "insert - left right rotation" {
var arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_allocator.deinit();
var ismap = IntToStrMap.init(&arena_allocator.allocator);
defer ismap.deinit();
const iter2 = try ismap.insert(2, "2");
const node2 = iter2._node.?;
expect(node2._kv._key == 2);
expect(mem.eql(u8, node2._kv._value, "2"));
expect(ismap._root == node2);
expect(ismap._size == 1);
expect(node2._parent == null);
expect(node2._left == null);
expect(node2._right == null);
expect(node2._balance == 0);
const iter0 = try ismap.insert(0, "0");
const node0 = iter0._node.?;
expect(node0._kv._key == 0);
expect(mem.eql(u8, node0._kv._value, "0"));
expect(ismap._root == node2);
expect(ismap._size == 2);
expect(node0._parent == node2);
expect(node0._left == null);
expect(node0._right == null);
expect(node0._balance == 0);
expect(node2._parent == null);
expect(node2._left == node0);
expect(node2._right == null);
expect(node2._balance == -1);
const iter1 = try ismap.insert(1, "1");
const node1 = iter1._node.?;
expect(node1._kv._key == 1);
expect(mem.eql(u8, node1._kv._value, "1"));
expect(ismap._root == node1);
expect(ismap._size == 3);
expect(node0._parent == node1);
expect(node0._left == null);
expect(node0._right == null);
expect(node0._balance == 0);
expect(node1._parent == null);
expect(node1._left == node0);
expect(node1._right == node2);
expect(node1._balance == 0);
expect(node2._parent == node1);
expect(node2._left == null);
expect(node2._right == null);
expect(node2._balance == 0);
}
test "insert - right left rotation" {
var arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_allocator.deinit();
var ismap = IntToStrMap.init(&arena_allocator.allocator);
defer ismap.deinit();
const iter0 = try ismap.insert(0, "0");
const node0 = iter0._node.?;
expect(node0._kv._key == 0);
expect(mem.eql(u8, node0._kv._value, "0"));
expect(ismap._root == node0);
expect(ismap._size == 1);
expect(node0._parent == null);
expect(node0._left == null);
expect(node0._right == null);
expect(node0._balance == 0);
const iter2 = try ismap.insert(2, "2");
const node2 = iter2._node.?;
expect(node2._kv._key == 2);
expect(mem.eql(u8, node2._kv._value, "2"));
expect(ismap._root == node0);
expect(ismap._size == 2);
expect(node0._parent == null);
expect(node0._left == null);
expect(node0._right == node2);
expect(node0._balance == 1);
expect(node2._parent == node0);
expect(node2._left == null);
expect(node2._right == null);
expect(node2._balance == 0);
const iter1 = try ismap.insert(1, "1");
const node1 = iter1._node.?;
expect(node1._kv._key == 1);
expect(mem.eql(u8, node1._kv._value, "1"));
expect(ismap._root == node1);
expect(ismap._size == 3);
expect(node0._parent == node1);
expect(node0._left == null);
expect(node0._right == null);
expect(node0._balance == 0);
expect(node1._parent == null);
expect(node1._left == node0);
expect(node1._right == node2);
expect(node1._balance == 0);
expect(node2._parent == node1);
expect(node2._left == null);
expect(node2._right == null);
expect(node2._balance == 0);
}
test "remove - 2 children - immediate successor" {
var arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_allocator.deinit();
var ismap = IntToStrMap.init(&arena_allocator.allocator);
defer ismap.deinit();
const iter1 = try ismap.insert(1, "1");
const node1 = iter1._node.?;
const iter0 = try ismap.insert(0, "0");
const node0 = iter0._node.?;
const iter2 = try ismap.insert(2, "2");
const node2 = iter2._node.?;
const iter3 = try ismap.insert(3, "3");
const node3 = iter3._node.?;
expect(ismap._root == node1);
expect(ismap._size == 4);
expect(node0._parent == node1);
expect(node0._left == null);
expect(node0._right == null);
expect(node0._balance == 0);
expect(node1._parent == null);
expect(node1._left == node0);
expect(node1._right == node2);
expect(node1._balance == 1);
expect(node2._parent == node1);
expect(node2._left == null);
expect(node2._right == node3);
expect(node2._balance == 1);
expect(node3._parent == node2);
expect(node3._left == null);
expect(node3._right == null);
expect(node3._balance == 0);
ismap.remove(iter1);
expect(ismap._root == node2);
expect(ismap._size == 3);
expect(node0._parent == node2);
expect(node0._left == null);
expect(node0._right == null);
expect(node0._balance == 0);
expect(node2._parent == null);
expect(node2._left == node0);
expect(node2._right == node3);
expect(node2._balance == 0);
expect(node3._parent == node2);
expect(node3._left == null);
expect(node3._right == null);
expect(node3._balance == 0);
}
test "remove - 2 children - non-immediate successor" {
var arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_allocator.deinit();
var ismap = IntToStrMap.init(&arena_allocator.allocator);
defer ismap.deinit();
const iter1 = try ismap.insert(1, "1");
const node1 = iter1._node.?;
const iter0 = try ismap.insert(0, "0");
const node0 = iter0._node.?;
const iter3 = try ismap.insert(3, "3");
const node3 = iter3._node.?;
const iter2 = try ismap.insert(2, "2");
const node2 = iter2._node.?;
expect(ismap._root == node1);
expect(ismap._size == 4);
expect(node0._parent == node1);
expect(node0._left == null);
expect(node0._right == null);
expect(node0._balance == 0);
expect(node1._parent == null);
expect(node1._left == node0);
expect(node1._right == node3);
expect(node1._balance == 1);
expect(node2._parent == node3);
expect(node2._left == null);
expect(node2._right == null);
expect(node2._balance == 0);
expect(node3._parent == node1);
expect(node3._left == node2);
expect(node3._right == null);
expect(node3._balance == -1);
ismap.remove(iter1);
expect(ismap._root == node2);
expect(ismap._size == 3);
expect(node0._parent == node2);
expect(node0._left == null);
expect(node0._right == null);
expect(node0._balance == 0);
expect(node2._parent == null);
expect(node2._left == node0);
expect(node2._right == node3);
expect(node2._balance == 0);
expect(node3._parent == node2);
expect(node3._left == null);
expect(node3._right == null);
expect(node3._balance == 0);
}
test "remove - 1 child - left" {
var arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_allocator.deinit();
var ismap = IntToStrMap.init(&arena_allocator.allocator);
defer ismap.deinit();
const iter1 = try ismap.insert(1, "1");
const node1 = iter1._node.?;
const iter0 = try ismap.insert(0, "0");
const node0 = iter0._node.?;
expect(ismap._root == node1);
expect(ismap._size == 2);
expect(node0._parent == node1);
expect(node0._left == null);
expect(node0._right == null);
expect(node0._balance == 0);
expect(node1._parent == null);
expect(node1._left == node0);
expect(node1._right == null);
expect(node1._balance == -1);
ismap.remove(iter1);
expect(ismap._root == node0);
expect(ismap._size == 1);
expect(node0._parent == null);
expect(node0._left == null);
expect(node0._right == null);
expect(node0._balance == 0);
}
test "remove - 1 child - right" {
var arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_allocator.deinit();
var ismap = IntToStrMap.init(&arena_allocator.allocator);
defer ismap.deinit();
const iter0 = try ismap.insert(0, "0");
const node0 = iter0._node.?;
const iter1 = try ismap.insert(1, "1");
const node1 = iter1._node.?;
expect(ismap._root == node0);
expect(ismap._size == 2);
expect(node0._parent == null);
expect(node0._left == null);
expect(node0._right == node1);
expect(node0._balance == 1);
expect(node1._parent == node0);
expect(node1._left == null);
expect(node1._right == null);
expect(node1._balance == 0);
ismap.remove(iter0);
expect(ismap._root == node1);
expect(ismap._size == 1);
expect(node1._parent == null);
expect(node1._left == null);
expect(node1._right == null);
expect(node1._balance == 0);
}
test "remove - 0 children" {
var arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_allocator.deinit();
var ismap = IntToStrMap.init(&arena_allocator.allocator);
defer ismap.deinit();
const iter0 = try ismap.insert(0, "0");
const node0 = iter0._node.?;
expect(ismap._root == node0);
expect(ismap._size == 1);
expect(node0._parent == null);
expect(node0._left == null);
expect(node0._right == null);
expect(node0._balance == 0);
ismap.remove(iter0);
expect(ismap._root == null);
expect(ismap._size == 0);
}
test "remove - rebalance - new=-2, left=-1" {
var arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_allocator.deinit();
var ismap = IntToStrMap.init(&arena_allocator.allocator);
defer ismap.deinit();
const iter2 = try ismap.insert(2, "2");
const node2 = iter2._node.?;
const iter1 = try ismap.insert(1, "1");
const node1 = iter1._node.?;
const iter3 = try ismap.insert(3, "3");
const node3 = iter3._node.?;
const iter0 = try ismap.insert(0, "0");
const node0 = iter0._node.?;
expect(ismap._root == node2);
expect(ismap._size == 4);
expect(node0._parent == node1);
expect(node0._left == null);
expect(node0._right == null);
expect(node0._balance == 0);
expect(node1._parent == node2);
expect(node1._left == node0);
expect(node1._right == null);
expect(node1._balance == -1);
expect(node2._parent == null);
expect(node2._left == node1);
expect(node2._right == node3);
expect(node2._balance == -1);
expect(node3._parent == node2);
expect(node3._left == null);
expect(node3._right == null);
expect(node3._balance == 0);
ismap.remove(iter3);
expect(ismap._root == node1);
expect(ismap._size == 3);
expect(node0._parent == node1);
expect(node0._left == null);
expect(node0._right == null);
expect(node0._balance == 0);
expect(node1._parent == null);
expect(node1._left == node0);
expect(node1._right == node2);
expect(node1._balance == 0);
expect(node2._parent == node1);
expect(node2._left == null);
expect(node2._right == null);
expect(node2._balance == 0);
}
test "remove - rebalance - new=-2, left=0" {
var arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_allocator.deinit();
var ismap = IntToStrMap.init(&arena_allocator.allocator);
defer ismap.deinit();
const iter3 = try ismap.insert(3, "3");
const node3 = iter3._node.?;
const iter1 = try ismap.insert(1, "1");
const node1 = iter1._node.?;
const iter4 = try ismap.insert(4, "4");
const node4 = iter4._node.?;
const iter0 = try ismap.insert(0, "0");
const node0 = iter0._node.?;
const iter2 = try ismap.insert(2, "2");
const node2 = iter2._node.?;
expect(ismap._root == node3);
expect(ismap._size == 5);
expect(node0._parent == node1);
expect(node0._left == null);
expect(node0._right == null);
expect(node0._balance == 0);
expect(node1._parent == node3);
expect(node1._left == node0);
expect(node1._right == node2);
expect(node1._balance == 0);
expect(node2._parent == node1);
expect(node2._left == null);
expect(node2._right == null);
expect(node2._balance == 0);
expect(node3._parent == null);
expect(node3._left == node1);
expect(node3._right == node4);
expect(node3._balance == -1);
expect(node4._parent == node3);
expect(node4._left == null);
expect(node4._right == null);
expect(node4._balance == 0);
ismap.remove(iter4);
expect(ismap._root == node1);
expect(ismap._size == 4);
expect(node0._parent == node1);
expect(node0._left == null);
expect(node0._right == null);
expect(node0._balance == 0);
expect(node1._parent == null);
expect(node1._left == node0);
expect(node1._right == node3);
expect(node1._balance == 1);
expect(node2._parent == node3);
expect(node2._left == null);
expect(node2._right == null);
expect(node2._balance == 0);
expect(node3._parent == node1);
expect(node3._left == node2);
expect(node3._right == null);
expect(node3._balance == -1);
}
test "remove - rebalance - new=-2, left=1" {
var arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_allocator.deinit();
var ismap = IntToStrMap.init(&arena_allocator.allocator);
defer ismap.deinit();
const iter2 = try ismap.insert(2, "2");
const node2 = iter2._node.?;
const iter0 = try ismap.insert(0, "0");
const node0 = iter0._node.?;
const iter3 = try ismap.insert(3, "3");
const node3 = iter3._node.?;
const iter1 = try ismap.insert(1, "1");
const node1 = iter1._node.?;
expect(ismap._root == node2);
expect(ismap._size == 4);
expect(node0._parent == node2);
expect(node0._left == null);
expect(node0._right == node1);
expect(node0._balance == 1);
expect(node1._parent == node0);
expect(node1._left == null);
expect(node1._right == null);
expect(node1._balance == 0);
expect(node2._parent == null);
expect(node2._left == node0);
expect(node2._right == node3);
expect(node2._balance == -1);
expect(node3._parent == node2);
expect(node3._left == null);
expect(node3._right == null);
expect(node3._balance == 0);
ismap.remove(iter3);
expect(ismap._root == node1);
expect(ismap._size == 3);
expect(node0._parent == node1);
expect(node0._left == null);
expect(node0._right == null);
expect(node0._balance == 0);
expect(node1._parent == null);
expect(node1._left == node0);
expect(node1._right == node2);
expect(node1._balance == 0);
expect(node2._parent == node1);
expect(node2._left == null);
expect(node2._right == null);
expect(node2._balance == 0);
}
test "remove - rebalance - new=2, right=1" {
var arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_allocator.deinit();
var ismap = IntToStrMap.init(&arena_allocator.allocator);
defer ismap.deinit();
const iter1 = try ismap.insert(1, "1");
const node1 = iter1._node.?;
const iter0 = try ismap.insert(0, "0");
const node0 = iter0._node.?;
const iter2 = try ismap.insert(2, "2");
const node2 = iter2._node.?;
const iter3 = try ismap.insert(3, "3");
const node3 = iter3._node.?;
expect(ismap._root == node1);
expect(ismap._size == 4);
expect(node0._parent == node1);
expect(node0._left == null);
expect(node0._right == null);
expect(node0._balance == 0);
expect(node1._parent == null);
expect(node1._left == node0);
expect(node1._right == node2);
expect(node1._balance == 1);
expect(node2._parent == node1);
expect(node2._left == null);
expect(node2._right == node3);
expect(node2._balance == 1);
expect(node3._parent == node2);
expect(node3._left == null);
expect(node3._right == null);
expect(node3._balance == 0);
ismap.remove(iter0);
expect(ismap._root == node2);
expect(ismap._size == 3);
expect(node1._parent == node2);
expect(node1._left == null);
expect(node1._right == null);
expect(node1._balance == 0);
expect(node2._parent == null);
expect(node2._left == node1);
expect(node2._right == node3);
expect(node2._balance == 0);
expect(node3._parent == node2);
expect(node3._left == null);
expect(node3._right == null);
expect(node3._balance == 0);
}
test "remove - rebalance - new=2, right=0" {
var arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_allocator.deinit();
var ismap = IntToStrMap.init(&arena_allocator.allocator);
defer ismap.deinit();
const iter1 = try ismap.insert(1, "1");
const node1 = iter1._node.?;
const iter0 = try ismap.insert(0, "0");
const node0 = iter0._node.?;
const iter3 = try ismap.insert(3, "3");
const node3 = iter3._node.?;
const iter2 = try ismap.insert(2, "2");
const node2 = iter2._node.?;
const iter4 = try ismap.insert(4, "4");
const node4 = iter4._node.?;
expect(ismap._root == node1);
expect(ismap._size == 5);
expect(node0._parent == node1);
expect(node0._left == null);
expect(node0._right == null);
expect(node0._balance == 0);
expect(node1._parent == null);
expect(node1._left == node0);
expect(node1._right == node3);
expect(node1._balance == 1);
expect(node2._parent == node3);
expect(node2._left == null);
expect(node2._right == null);
expect(node2._balance == 0);
expect(node3._parent == node1);
expect(node3._left == node2);
expect(node3._right == node4);
expect(node3._balance == 0);
expect(node4._parent == node3);
expect(node4._left == null);
expect(node4._right == null);
expect(node4._balance == 0);
ismap.remove(iter0);
expect(ismap._root == node3);
expect(ismap._size == 4);
expect(node1._parent == node3);
expect(node1._left == null);
expect(node1._right == node2);
expect(node1._balance == 1);
expect(node2._parent == node1);
expect(node2._left == null);
expect(node2._right == null);
expect(node2._balance == 0);
expect(node3._parent == null);
expect(node3._left == node1);
expect(node3._right == node4);
expect(node3._balance == -1);
expect(node4._parent == node3);
expect(node4._left == null);
expect(node4._right == null);
expect(node4._balance == 0);
}
test "remove - rebalance - new=2, right=-1" {
var arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_allocator.deinit();
var ismap = IntToStrMap.init(&arena_allocator.allocator);
defer ismap.deinit();
const iter1 = try ismap.insert(1, "1");
const node1 = iter1._node.?;
const iter0 = try ismap.insert(0, "0");
const node0 = iter0._node.?;
const iter3 = try ismap.insert(3, "3");
const node3 = iter3._node.?;
const iter2 = try ismap.insert(2, "2");
const node2 = iter2._node.?;
expect(ismap._root == node1);
expect(ismap._size == 4);
expect(node0._parent == node1);
expect(node0._left == null);
expect(node0._right == null);
expect(node0._balance == 0);
expect(node1._parent == null);
expect(node1._left == node0);
expect(node1._right == node3);
expect(node1._balance == 1);
expect(node2._parent == node3);
expect(node2._left == null);
expect(node2._right == null);
expect(node2._balance == 0);
expect(node3._parent == node1);
expect(node3._left == node2);
expect(node3._right == null);
expect(node3._balance == -1);
ismap.remove(iter0);
expect(ismap._root == node2);
expect(ismap._size == 3);
expect(node1._parent == node2);
expect(node1._left == null);
expect(node1._right == null);
expect(node1._balance == 0);
expect(node2._parent == null);
expect(node2._left == node1);
expect(node2._right == node3);
expect(node2._balance == 0);
expect(node3._parent == node2);
expect(node3._left == null);
expect(node3._right == null);
expect(node3._balance == 0);
}
test "remove - rebalance - new=-1" {
var arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_allocator.deinit();
var ismap = IntToStrMap.init(&arena_allocator.allocator);
defer ismap.deinit();
const iter1 = try ismap.insert(1, "1");
const node1 = iter1._node.?;
const iter0 = try ismap.insert(0, "0");
const node0 = iter0._node.?;
const iter2 = try ismap.insert(2, "2");
const node2 = iter2._node.?;
expect(ismap._root == node1);
expect(ismap._size == 3);
expect(node0._parent == node1);
expect(node0._left == null);
expect(node0._right == null);
expect(node0._balance == 0);
expect(node1._parent == null);
expect(node1._left == node0);
expect(node1._right == node2);
expect(node1._balance == 0);
expect(node2._parent == node1);
expect(node2._left == null);
expect(node2._right == null);
expect(node2._balance == 0);
ismap.remove(iter2);
expect(ismap._root == node1);
expect(ismap._size == 2);
expect(node0._parent == node1);
expect(node0._left == null);
expect(node0._right == null);
expect(node0._balance == 0);
expect(node1._parent == null);
expect(node1._left == node0);
expect(node1._right == null);
expect(node1._balance == -1);
}
test "remove - rebalance - new=1" {
var arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_allocator.deinit();
var ismap = IntToStrMap.init(&arena_allocator.allocator);
defer ismap.deinit();
const iter1 = try ismap.insert(1, "1");
const node1 = iter1._node.?;
const iter0 = try ismap.insert(0, "0");
const node0 = iter0._node.?;
const iter2 = try ismap.insert(2, "2");
const node2 = iter2._node.?;
expect(ismap._root == node1);
expect(ismap._size == 3);
expect(node0._parent == node1);
expect(node0._left == null);
expect(node0._right == null);
expect(node0._balance == 0);
expect(node1._parent == null);
expect(node1._left == node0);
expect(node1._right == node2);
expect(node1._balance == 0);
expect(node2._parent == node1);
expect(node2._left == null);
expect(node2._right == null);
expect(node2._balance == 0);
ismap.remove(iter0);
expect(ismap._root == node1);
expect(ismap._size == 2);
expect(node1._parent == null);
expect(node1._left == null);
expect(node1._right == node2);
expect(node1._balance == 1);
expect(node2._parent == node1);
expect(node2._left == null);
expect(node2._right == null);
expect(node2._balance == 0);
}
test "remove - rebalance - new=0" {
var arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_allocator.deinit();
var ismap = IntToStrMap.init(&arena_allocator.allocator);
defer ismap.deinit();
const iter2 = try ismap.insert(2, "2");
const node2 = iter2._node.?;
const iter1 = try ismap.insert(1, "1");
const node1 = iter1._node.?;
const iter3 = try ismap.insert(3, "3");
const node3 = iter3._node.?;
const iter0 = try ismap.insert(0, "0");
const node0 = iter0._node.?;
expect(ismap._root == node2);
expect(ismap._size == 4);
expect(node0._parent == node1);
expect(node0._left == null);
expect(node0._right == null);
expect(node0._balance == 0);
expect(node1._parent == node2);
expect(node1._left == node0);
expect(node1._right == null);
expect(node1._balance == -1);
expect(node2._parent == null);
expect(node2._left == node1);
expect(node2._right == node3);
expect(node2._balance == -1);
expect(node3._parent == node2);
expect(node3._left == null);
expect(node3._right == null);
expect(node3._balance == 0);
ismap.remove(iter0);
expect(ismap._root == node2);
expect(ismap._size == 3);
expect(node1._parent == node2);
expect(node1._left == null);
expect(node1._right == null);
expect(node1._balance == 0);
expect(node2._parent == null);
expect(node2._left == node1);
expect(node2._right == node3);
expect(node2._balance == 0);
expect(node3._parent == node2);
expect(node3._left == null);
expect(node3._right == null);
expect(node3._balance == 0);
}
test "iterate" {
var arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_allocator.deinit();
var ismap = IntToStrMap.init(&arena_allocator.allocator);
defer ismap.deinit();
var iter: IntToStrMap.Iterator = undefined;
var maybe_kv: ?*IntToStrMap.KeyValue = null;
iter = ismap.iterator();
expect(!iter.valid());
maybe_kv = iter.next();
expect(maybe_kv == null);
expect(!iter.valid());
_ = try ismap.insert(3, "3");
iter = ismap.iterator();
expect(iter.valid());
maybe_kv = iter.next();
expect(maybe_kv != null);
expect(maybe_kv.?.key() == 3);
expect(!iter.valid());
maybe_kv = iter.next();
expect(maybe_kv == null);
expect(!iter.valid());
_ = try ismap.insert(5, "5");
iter = ismap.iterator();
maybe_kv = iter.next();
expect(maybe_kv != null);
expect(maybe_kv.?.key() == 3);
maybe_kv = iter.next();
expect(maybe_kv != null);
expect(maybe_kv.?.key() == 5);
maybe_kv = iter.next();
expect(maybe_kv == null);
_ = try ismap.insert(1, "1");
iter = ismap.iterator();
maybe_kv = iter.next();
expect(maybe_kv != null);
expect(maybe_kv.?.key() == 1);
maybe_kv = iter.next();
expect(maybe_kv != null);
expect(maybe_kv.?.key() == 3);
maybe_kv = iter.next();
expect(maybe_kv != null);
expect(maybe_kv.?.key() == 5);
maybe_kv = iter.next();
expect(maybe_kv == null);
_ = try ismap.insert(2, "2");
iter = ismap.iterator();
maybe_kv = iter.next();
expect(maybe_kv != null);
expect(maybe_kv.?.key() == 1);
maybe_kv = iter.next();
expect(maybe_kv != null);
expect(maybe_kv.?.key() == 2);
maybe_kv = iter.next();
expect(maybe_kv != null);
expect(maybe_kv.?.key() == 3);
maybe_kv = iter.next();
expect(maybe_kv != null);
expect(maybe_kv.?.key() == 5);
maybe_kv = iter.next();
expect(maybe_kv == null);
_ = try ismap.insert(4, "4");
iter = ismap.iterator();
maybe_kv = iter.next();
expect(maybe_kv != null);
expect(maybe_kv.?.key() == 1);
maybe_kv = iter.next();
expect(maybe_kv != null);
expect(maybe_kv.?.key() == 2);
maybe_kv = iter.next();
expect(maybe_kv != null);
expect(maybe_kv.?.key() == 3);
maybe_kv = iter.next();
expect(maybe_kv != null);
expect(maybe_kv.?.key() == 4);
maybe_kv = iter.next();
expect(maybe_kv != null);
expect(maybe_kv.?.key() == 5);
maybe_kv = iter.next();
expect(maybe_kv == null);
}
test "find" {
var arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_allocator.deinit();
var ismap = IntToStrMap.init(&arena_allocator.allocator);
defer ismap.deinit();
_ = try ismap.insert(3, "3");
_ = try ismap.insert(5, "5");
_ = try ismap.insert(1, "1");
const iter2 = try ismap.insert(2, "2");
const node2 = iter2._node.?;
_ = try ismap.insert(4, "4");
const iter = ismap.find(2);
expect(iter._node == node2);
expect(iter.valid());
expect(iter.key() == 2);
expect(mem.eql(u8, iter.value(), "2"));
}
test "set" {
var arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_allocator.deinit();
var iset = IntSet.init(&arena_allocator.allocator);
defer iset.deinit();
const iter0 = try iset.insert(0, {});
const node0 = iter0._node.?;
expect(node0._kv._key == 0);
expect(iset._root == node0);
expect(iset._size == 1);
expect(node0._parent == null);
expect(node0._left == null);
expect(node0._right == null);
expect(node0._balance == 0);
} | src/avl.zig |
const std = @import("std");
const mem = std.mem;
const assert = std.debug.assert;
const ir = @import("ir.zig");
const Type = @import("type.zig").Type;
const Value = @import("value.zig").Value;
const TypedValue = @import("TypedValue.zig");
const link = @import("link.zig");
const Module = @import("Module.zig");
const ErrorMsg = Module.ErrorMsg;
const Target = std.Target;
const Allocator = mem.Allocator;
const trace = @import("tracy.zig").trace;
/// The codegen-related data that is stored in `ir.Inst.Block` instructions.
pub const BlockData = struct {
relocs: std.ArrayListUnmanaged(Reloc) = .{},
};
pub const Reloc = union(enum) {
/// The value is an offset into the `Function` `code` from the beginning.
/// To perform the reloc, write 32-bit signed little-endian integer
/// which is a relative jump, based on the address following the reloc.
rel32: usize,
};
pub const Result = union(enum) {
/// The `code` parameter passed to `generateSymbol` has the value appended.
appended: void,
/// The value is available externally, `code` is unused.
externally_managed: []const u8,
fail: *Module.ErrorMsg,
};
pub fn generateSymbol(
bin_file: *link.File.Elf,
src: usize,
typed_value: TypedValue,
code: *std.ArrayList(u8),
) error{
OutOfMemory,
/// A Decl that this symbol depends on had a semantic analysis failure.
AnalysisFail,
}!Result {
const tracy = trace(@src());
defer tracy.end();
switch (typed_value.ty.zigTypeTag()) {
.Fn => {
const module_fn = typed_value.val.cast(Value.Payload.Function).?.func;
const fn_type = module_fn.owner_decl.typed_value.most_recent.typed_value.ty;
const param_types = try bin_file.allocator.alloc(Type, fn_type.fnParamLen());
defer bin_file.allocator.free(param_types);
fn_type.fnParamTypes(param_types);
var mc_args = try bin_file.allocator.alloc(MCValue, param_types.len);
defer bin_file.allocator.free(mc_args);
var branch_stack = std.ArrayList(Function.Branch).init(bin_file.allocator);
defer {
assert(branch_stack.items.len == 1);
branch_stack.items[0].deinit(bin_file.allocator);
branch_stack.deinit();
}
const branch = try branch_stack.addOne();
branch.* = .{};
var function = Function{
.gpa = bin_file.allocator,
.target = &bin_file.options.target,
.bin_file = bin_file,
.mod_fn = module_fn,
.code = code,
.err_msg = null,
.args = mc_args,
.branch_stack = &branch_stack,
.src = src,
};
const cc = fn_type.fnCallingConvention();
branch.max_end_stack = function.resolveParameters(src, cc, param_types, mc_args) catch |err| switch (err) {
error.CodegenFail => return Result{ .fail = function.err_msg.? },
else => |e| return e,
};
function.gen() catch |err| switch (err) {
error.CodegenFail => return Result{ .fail = function.err_msg.? },
else => |e| return e,
};
if (function.err_msg) |em| {
return Result{ .fail = em };
} else {
return Result{ .appended = {} };
}
},
.Array => {
if (typed_value.val.cast(Value.Payload.Bytes)) |payload| {
if (typed_value.ty.arraySentinel()) |sentinel| {
try code.ensureCapacity(code.items.len + payload.data.len + 1);
code.appendSliceAssumeCapacity(payload.data);
const prev_len = code.items.len;
switch (try generateSymbol(bin_file, src, .{
.ty = typed_value.ty.elemType(),
.val = sentinel,
}, code)) {
.appended => return Result{ .appended = {} },
.externally_managed => |slice| {
code.appendSliceAssumeCapacity(slice);
return Result{ .appended = {} };
},
.fail => |em| return Result{ .fail = em },
}
} else {
return Result{ .externally_managed = payload.data };
}
}
return Result{
.fail = try ErrorMsg.create(
bin_file.allocator,
src,
"TODO implement generateSymbol for more kinds of arrays",
.{},
),
};
},
.Pointer => {
if (typed_value.val.cast(Value.Payload.DeclRef)) |payload| {
const decl = payload.decl;
if (decl.analysis != .complete) return error.AnalysisFail;
assert(decl.link.local_sym_index != 0);
// TODO handle the dependency of this symbol on the decl's vaddr.
// If the decl changes vaddr, then this symbol needs to get regenerated.
const vaddr = bin_file.local_symbols.items[decl.link.local_sym_index].st_value;
const endian = bin_file.options.target.cpu.arch.endian();
switch (bin_file.ptr_width) {
.p32 => {
try code.resize(4);
mem.writeInt(u32, code.items[0..4], @intCast(u32, vaddr), endian);
},
.p64 => {
try code.resize(8);
mem.writeInt(u64, code.items[0..8], vaddr, endian);
},
}
return Result{ .appended = {} };
}
return Result{
.fail = try ErrorMsg.create(
bin_file.allocator,
src,
"TODO implement generateSymbol for pointer {}",
.{typed_value.val},
),
};
},
.Int => {
const info = typed_value.ty.intInfo(bin_file.options.target);
if (info.bits == 8 and !info.signed) {
const x = typed_value.val.toUnsignedInt();
try code.append(@intCast(u8, x));
return Result{ .appended = {} };
}
return Result{
.fail = try ErrorMsg.create(
bin_file.allocator,
src,
"TODO implement generateSymbol for int type '{}'",
.{typed_value.ty},
),
};
},
else => |t| {
return Result{
.fail = try ErrorMsg.create(
bin_file.allocator,
src,
"TODO implement generateSymbol for type '{}'",
.{@tagName(t)},
),
};
},
}
}
const InnerError = error{
OutOfMemory,
CodegenFail,
};
const MCValue = union(enum) {
/// No runtime bits. `void` types, empty structs, u0, enums with 1 tag, etc.
none,
/// Control flow will not allow this value to be observed.
unreach,
/// No more references to this value remain.
dead,
/// A pointer-sized integer that fits in a register.
immediate: u64,
/// The constant was emitted into the code, at this offset.
embedded_in_code: usize,
/// The value is in a target-specific register. The value can
/// be @intToEnum casted to the respective Reg enum.
register: usize,
/// The value is in memory at a hard-coded address.
memory: u64,
/// The value is one of the stack variables.
stack_offset: u64,
/// The value is in the compare flags assuming an unsigned operation,
/// with this operator applied on top of it.
compare_flags_unsigned: std.math.CompareOperator,
/// The value is in the compare flags assuming a signed operation,
/// with this operator applied on top of it.
compare_flags_signed: std.math.CompareOperator,
fn isMemory(mcv: MCValue) bool {
return switch (mcv) {
.embedded_in_code, .memory, .stack_offset => true,
else => false,
};
}
fn isImmediate(mcv: MCValue) bool {
return switch (mcv) {
.immediate => true,
else => false,
};
}
fn isMutable(mcv: MCValue) bool {
return switch (mcv) {
.none => unreachable,
.unreach => unreachable,
.dead => unreachable,
.immediate,
.embedded_in_code,
.memory,
.compare_flags_unsigned,
.compare_flags_signed,
=> false,
.register,
.stack_offset,
=> true,
};
}
};
const Function = struct {
gpa: *Allocator,
bin_file: *link.File.Elf,
target: *const std.Target,
mod_fn: *const Module.Fn,
code: *std.ArrayList(u8),
err_msg: ?*ErrorMsg,
args: []MCValue,
src: usize,
/// Whenever there is a runtime branch, we push a Branch onto this stack,
/// and pop it off when the runtime branch joins. This provides an "overlay"
/// of the table of mappings from instructions to `MCValue` from within the branch.
/// This way we can modify the `MCValue` for an instruction in different ways
/// within different branches. Special consideration is needed when a branch
/// joins with its parent, to make sure all instructions have the same MCValue
/// across each runtime branch upon joining.
branch_stack: *std.ArrayList(Branch),
const Branch = struct {
inst_table: std.AutoHashMapUnmanaged(*ir.Inst, MCValue) = .{},
/// The key is an enum value of an arch-specific register.
registers: std.AutoHashMapUnmanaged(usize, RegisterAllocation) = .{},
/// Maps offset to what is stored there.
stack: std.AutoHashMapUnmanaged(usize, StackAllocation) = .{},
/// Offset from the stack base, representing the end of the stack frame.
max_end_stack: u32 = 0,
/// Represents the current end stack offset. If there is no existing slot
/// to place a new stack allocation, it goes here, and then bumps `max_end_stack`.
next_stack_offset: u32 = 0,
fn deinit(self: *Branch, gpa: *Allocator) void {
self.inst_table.deinit(gpa);
self.registers.deinit(gpa);
self.stack.deinit(gpa);
self.* = undefined;
}
};
const RegisterAllocation = struct {
inst: *ir.Inst,
};
const StackAllocation = struct {
inst: *ir.Inst,
size: u32,
};
fn gen(self: *Function) !void {
switch (self.target.cpu.arch) {
.arm => return self.genArch(.arm),
.armeb => return self.genArch(.armeb),
.aarch64 => return self.genArch(.aarch64),
.aarch64_be => return self.genArch(.aarch64_be),
.aarch64_32 => return self.genArch(.aarch64_32),
.arc => return self.genArch(.arc),
.avr => return self.genArch(.avr),
.bpfel => return self.genArch(.bpfel),
.bpfeb => return self.genArch(.bpfeb),
.hexagon => return self.genArch(.hexagon),
.mips => return self.genArch(.mips),
.mipsel => return self.genArch(.mipsel),
.mips64 => return self.genArch(.mips64),
.mips64el => return self.genArch(.mips64el),
.msp430 => return self.genArch(.msp430),
.powerpc => return self.genArch(.powerpc),
.powerpc64 => return self.genArch(.powerpc64),
.powerpc64le => return self.genArch(.powerpc64le),
.r600 => return self.genArch(.r600),
.amdgcn => return self.genArch(.amdgcn),
.riscv32 => return self.genArch(.riscv32),
.riscv64 => return self.genArch(.riscv64),
.sparc => return self.genArch(.sparc),
.sparcv9 => return self.genArch(.sparcv9),
.sparcel => return self.genArch(.sparcel),
.s390x => return self.genArch(.s390x),
.tce => return self.genArch(.tce),
.tcele => return self.genArch(.tcele),
.thumb => return self.genArch(.thumb),
.thumbeb => return self.genArch(.thumbeb),
.i386 => return self.genArch(.i386),
.x86_64 => return self.genArch(.x86_64),
.xcore => return self.genArch(.xcore),
.nvptx => return self.genArch(.nvptx),
.nvptx64 => return self.genArch(.nvptx64),
.le32 => return self.genArch(.le32),
.le64 => return self.genArch(.le64),
.amdil => return self.genArch(.amdil),
.amdil64 => return self.genArch(.amdil64),
.hsail => return self.genArch(.hsail),
.hsail64 => return self.genArch(.hsail64),
.spir => return self.genArch(.spir),
.spir64 => return self.genArch(.spir64),
.kalimba => return self.genArch(.kalimba),
.shave => return self.genArch(.shave),
.lanai => return self.genArch(.lanai),
.wasm32 => return self.genArch(.wasm32),
.wasm64 => return self.genArch(.wasm64),
.renderscript32 => return self.genArch(.renderscript32),
.renderscript64 => return self.genArch(.renderscript64),
.ve => return self.genArch(.ve),
}
}
fn genArch(self: *Function, comptime arch: std.Target.Cpu.Arch) !void {
try self.code.ensureCapacity(self.code.items.len + 11);
// push rbp
// mov rbp, rsp
self.code.appendSliceAssumeCapacity(&[_]u8{ 0x55, 0x48, 0x89, 0xe5 });
// sub rsp, x
const stack_end = self.branch_stack.items[0].max_end_stack;
if (stack_end > std.math.maxInt(i32)) {
return self.fail(self.src, "too much stack used in call parameters", .{});
} else if (stack_end > std.math.maxInt(i8)) {
// 48 83 ec xx sub rsp,0x10
self.code.appendSliceAssumeCapacity(&[_]u8{ 0x48, 0x81, 0xec });
const x = @intCast(u32, stack_end);
mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), x);
} else if (stack_end != 0) {
// 48 81 ec xx xx xx xx sub rsp,0x80
const x = @intCast(u8, stack_end);
self.code.appendSliceAssumeCapacity(&[_]u8{ 0x48, 0x83, 0xec, x });
}
try self.genBody(self.mod_fn.analysis.success, arch);
}
fn genBody(self: *Function, body: ir.Body, comptime arch: std.Target.Cpu.Arch) InnerError!void {
const inst_table = &self.branch_stack.items[0].inst_table;
for (body.instructions) |inst| {
const new_inst = try self.genFuncInst(inst, arch);
try inst_table.putNoClobber(self.gpa, inst, new_inst);
}
}
fn genFuncInst(self: *Function, inst: *ir.Inst, comptime arch: std.Target.Cpu.Arch) !MCValue {
switch (inst.tag) {
.add => return self.genAdd(inst.cast(ir.Inst.Add).?, arch),
.arg => return self.genArg(inst.cast(ir.Inst.Arg).?),
.assembly => return self.genAsm(inst.cast(ir.Inst.Assembly).?, arch),
.bitcast => return self.genBitCast(inst.cast(ir.Inst.BitCast).?),
.block => return self.genBlock(inst.cast(ir.Inst.Block).?, arch),
.br => return self.genBr(inst.cast(ir.Inst.Br).?, arch),
.breakpoint => return self.genBreakpoint(inst.src, arch),
.brvoid => return self.genBrVoid(inst.cast(ir.Inst.BrVoid).?, arch),
.call => return self.genCall(inst.cast(ir.Inst.Call).?, arch),
.cmp => return self.genCmp(inst.cast(ir.Inst.Cmp).?, arch),
.condbr => return self.genCondBr(inst.cast(ir.Inst.CondBr).?, arch),
.constant => unreachable, // excluded from function bodies
.isnonnull => return self.genIsNonNull(inst.cast(ir.Inst.IsNonNull).?, arch),
.isnull => return self.genIsNull(inst.cast(ir.Inst.IsNull).?, arch),
.ptrtoint => return self.genPtrToInt(inst.cast(ir.Inst.PtrToInt).?),
.ret => return self.genRet(inst.cast(ir.Inst.Ret).?, arch),
.retvoid => return self.genRetVoid(inst.cast(ir.Inst.RetVoid).?, arch),
.sub => return self.genSub(inst.cast(ir.Inst.Sub).?, arch),
.unreach => return MCValue{ .unreach = {} },
.not => return self.genNot(inst.cast(ir.Inst.Not).?, arch),
}
}
fn genNot(self: *Function, inst: *ir.Inst.Not, comptime arch: std.Target.Cpu.Arch) !MCValue {
// No side effects, so if it's unreferenced, do nothing.
if (inst.base.isUnused())
return MCValue.dead;
const operand = try self.resolveInst(inst.args.operand);
switch (operand) {
.dead => unreachable,
.unreach => unreachable,
.compare_flags_unsigned => |op| return MCValue{
.compare_flags_unsigned = switch (op) {
.gte => .lt,
.gt => .lte,
.neq => .eq,
.lt => .gte,
.lte => .gt,
.eq => .neq,
},
},
.compare_flags_signed => |op| return MCValue{
.compare_flags_signed = switch (op) {
.gte => .lt,
.gt => .lte,
.neq => .eq,
.lt => .gte,
.lte => .gt,
.eq => .neq,
},
},
else => {},
}
switch (arch) {
.x86_64 => {
var imm = ir.Inst.Constant{
.base = .{
.tag = .constant,
.deaths = 0,
.ty = inst.args.operand.ty,
.src = inst.args.operand.src,
},
.val = Value.initTag(.bool_true),
};
return try self.genX8664BinMath(&inst.base, inst.args.operand, &imm.base, 6, 0x30);
},
else => return self.fail(inst.base.src, "TODO implement NOT for {}", .{self.target.cpu.arch}),
}
}
fn genAdd(self: *Function, inst: *ir.Inst.Add, comptime arch: std.Target.Cpu.Arch) !MCValue {
// No side effects, so if it's unreferenced, do nothing.
if (inst.base.isUnused())
return MCValue.dead;
switch (arch) {
.x86_64 => {
return try self.genX8664BinMath(&inst.base, inst.args.lhs, inst.args.rhs, 0, 0x00);
},
else => return self.fail(inst.base.src, "TODO implement add for {}", .{self.target.cpu.arch}),
}
}
fn genSub(self: *Function, inst: *ir.Inst.Sub, comptime arch: std.Target.Cpu.Arch) !MCValue {
// No side effects, so if it's unreferenced, do nothing.
if (inst.base.isUnused())
return MCValue.dead;
switch (arch) {
.x86_64 => {
return try self.genX8664BinMath(&inst.base, inst.args.lhs, inst.args.rhs, 5, 0x28);
},
else => return self.fail(inst.base.src, "TODO implement sub for {}", .{self.target.cpu.arch}),
}
}
/// ADD, SUB, XOR, OR, AND
fn genX8664BinMath(self: *Function, inst: *ir.Inst, op_lhs: *ir.Inst, op_rhs: *ir.Inst, opx: u8, mr: u8) !MCValue {
try self.code.ensureCapacity(self.code.items.len + 8);
const lhs = try self.resolveInst(op_lhs);
const rhs = try self.resolveInst(op_rhs);
// There are 2 operands, destination and source.
// Either one, but not both, can be a memory operand.
// Source operand can be an immediate, 8 bits or 32 bits.
// So, if either one of the operands dies with this instruction, we can use it
// as the result MCValue.
var dst_mcv: MCValue = undefined;
var src_mcv: MCValue = undefined;
var src_inst: *ir.Inst = undefined;
if (inst.operandDies(0) and lhs.isMutable()) {
// LHS dies; use it as the destination.
// Both operands cannot be memory.
src_inst = op_rhs;
if (lhs.isMemory() and rhs.isMemory()) {
dst_mcv = try self.copyToNewRegister(op_lhs);
src_mcv = rhs;
} else {
dst_mcv = lhs;
src_mcv = rhs;
}
} else if (inst.operandDies(1) and rhs.isMutable()) {
// RHS dies; use it as the destination.
// Both operands cannot be memory.
src_inst = op_lhs;
if (lhs.isMemory() and rhs.isMemory()) {
dst_mcv = try self.copyToNewRegister(op_rhs);
src_mcv = lhs;
} else {
dst_mcv = rhs;
src_mcv = lhs;
}
} else {
if (lhs.isMemory()) {
dst_mcv = try self.copyToNewRegister(op_lhs);
src_mcv = rhs;
src_inst = op_rhs;
} else {
dst_mcv = try self.copyToNewRegister(op_rhs);
src_mcv = lhs;
src_inst = op_lhs;
}
}
// This instruction supports only signed 32-bit immediates at most. If the immediate
// value is larger than this, we put it in a register.
// A potential opportunity for future optimization here would be keeping track
// of the fact that the instruction is available both as an immediate
// and as a register.
switch (src_mcv) {
.immediate => |imm| {
if (imm > std.math.maxInt(u31)) {
src_mcv = try self.copyToNewRegister(src_inst);
}
},
else => {},
}
try self.genX8664BinMathCode(inst.src, dst_mcv, src_mcv, opx, mr);
return dst_mcv;
}
fn genX8664BinMathCode(self: *Function, src: usize, dst_mcv: MCValue, src_mcv: MCValue, opx: u8, mr: u8) !void {
switch (dst_mcv) {
.none => unreachable,
.dead, .unreach, .immediate => unreachable,
.compare_flags_unsigned => unreachable,
.compare_flags_signed => unreachable,
.register => |dst_reg_usize| {
const dst_reg = @intToEnum(Reg(.x86_64), @intCast(u8, dst_reg_usize));
switch (src_mcv) {
.none => unreachable,
.dead, .unreach => unreachable,
.register => |src_reg_usize| {
const src_reg = @intToEnum(Reg(.x86_64), @intCast(u8, src_reg_usize));
self.rex(.{ .b = dst_reg.isExtended(), .r = src_reg.isExtended(), .w = dst_reg.size() == 64 });
self.code.appendSliceAssumeCapacity(&[_]u8{ mr + 0x1, 0xC0 | (@as(u8, src_reg.id() & 0b111) << 3) | @as(u8, dst_reg.id() & 0b111) });
},
.immediate => |imm| {
const imm32 = @intCast(u31, imm); // This case must be handled before calling genX8664BinMathCode.
// 81 /opx id
if (imm32 <= std.math.maxInt(u7)) {
self.rex(.{ .b = dst_reg.isExtended(), .w = dst_reg.size() == 64 });
self.code.appendSliceAssumeCapacity(&[_]u8{
0x83,
0xC0 | (opx << 3) | @truncate(u3, dst_reg.id()),
@intCast(u8, imm32),
});
} else {
self.rex(.{ .r = dst_reg.isExtended(), .w = dst_reg.size() == 64 });
self.code.appendSliceAssumeCapacity(&[_]u8{
0x81,
0xC0 | (opx << 3) | @truncate(u3, dst_reg.id()),
});
std.mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), imm32);
}
},
.embedded_in_code, .memory, .stack_offset => {
return self.fail(src, "TODO implement x86 ADD/SUB/CMP source memory", .{});
},
.compare_flags_unsigned => {
return self.fail(src, "TODO implement x86 ADD/SUB/CMP source compare flag (unsigned)", .{});
},
.compare_flags_signed => {
return self.fail(src, "TODO implement x86 ADD/SUB/CMP source compare flag (signed)", .{});
},
}
},
.embedded_in_code, .memory, .stack_offset => {
return self.fail(src, "TODO implement x86 ADD/SUB/CMP destination memory", .{});
},
}
}
fn genArg(self: *Function, inst: *ir.Inst.Arg) !MCValue {
return self.args[inst.args.index];
}
fn genBreakpoint(self: *Function, src: usize, comptime arch: std.Target.Cpu.Arch) !MCValue {
switch (arch) {
.i386, .x86_64 => {
try self.code.append(0xcc); // int3
},
else => return self.fail(src, "TODO implement @breakpoint() for {}", .{self.target.cpu.arch}),
}
return .none;
}
fn genCall(self: *Function, inst: *ir.Inst.Call, comptime arch: std.Target.Cpu.Arch) !MCValue {
const fn_ty = inst.args.func.ty;
const cc = fn_ty.fnCallingConvention();
const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen());
defer self.gpa.free(param_types);
fn_ty.fnParamTypes(param_types);
var mc_args = try self.gpa.alloc(MCValue, param_types.len);
defer self.gpa.free(mc_args);
const stack_byte_count = try self.resolveParameters(inst.base.src, cc, param_types, mc_args);
switch (arch) {
.x86_64 => {
for (mc_args) |mc_arg, arg_i| {
const arg = inst.args.args[arg_i];
const arg_mcv = try self.resolveInst(inst.args.args[arg_i]);
switch (mc_arg) {
.none => continue,
.register => |reg| {
try self.genSetReg(arg.src, arch, @intToEnum(Reg(arch), @intCast(u8, reg)), arg_mcv);
// TODO interact with the register allocator to mark the instruction as moved.
},
.stack_offset => {
// Here we need to emit instructions like this:
// mov qword ptr [rsp + stack_offset], x
return self.fail(inst.base.src, "TODO implement calling with parameters in memory", .{});
},
.immediate => unreachable,
.unreach => unreachable,
.dead => unreachable,
.embedded_in_code => unreachable,
.memory => unreachable,
.compare_flags_signed => unreachable,
.compare_flags_unsigned => unreachable,
}
}
if (inst.args.func.cast(ir.Inst.Constant)) |func_inst| {
if (func_inst.val.cast(Value.Payload.Function)) |func_val| {
const func = func_val.func;
const got = &self.bin_file.program_headers.items[self.bin_file.phdr_got_index.?];
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = @intCast(u32, got.p_vaddr + func.owner_decl.link.offset_table_index * ptr_bytes);
// ff 14 25 xx xx xx xx call [addr]
try self.code.ensureCapacity(self.code.items.len + 7);
self.code.appendSliceAssumeCapacity(&[3]u8{ 0xff, 0x14, 0x25 });
mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), got_addr);
} else {
return self.fail(inst.base.src, "TODO implement calling bitcasted functions", .{});
}
} else {
return self.fail(inst.base.src, "TODO implement calling runtime known function pointer", .{});
}
},
else => return self.fail(inst.base.src, "TODO implement call for {}", .{self.target.cpu.arch}),
}
const return_type = fn_ty.fnReturnType();
switch (return_type.zigTypeTag()) {
.Void => return MCValue{ .none = {} },
.NoReturn => return MCValue{ .unreach = {} },
else => return self.fail(inst.base.src, "TODO implement fn call with non-void return value", .{}),
}
}
fn ret(self: *Function, src: usize, comptime arch: std.Target.Cpu.Arch, mcv: MCValue) !MCValue {
if (mcv != .none) {
return self.fail(src, "TODO implement return with non-void operand", .{});
}
switch (arch) {
.i386 => {
try self.code.append(0xc3); // ret
},
.x86_64 => {
try self.code.appendSlice(&[_]u8{
0x5d, // pop rbp
0xc3, // ret
});
},
else => return self.fail(src, "TODO implement return for {}", .{self.target.cpu.arch}),
}
return .unreach;
}
fn genRet(self: *Function, inst: *ir.Inst.Ret, comptime arch: std.Target.Cpu.Arch) !MCValue {
const operand = try self.resolveInst(inst.args.operand);
return self.ret(inst.base.src, arch, operand);
}
fn genRetVoid(self: *Function, inst: *ir.Inst.RetVoid, comptime arch: std.Target.Cpu.Arch) !MCValue {
return self.ret(inst.base.src, arch, .none);
}
fn genCmp(self: *Function, inst: *ir.Inst.Cmp, comptime arch: std.Target.Cpu.Arch) !MCValue {
// No side effects, so if it's unreferenced, do nothing.
if (inst.base.isUnused())
return MCValue.dead;
switch (arch) {
.x86_64 => {
try self.code.ensureCapacity(self.code.items.len + 8);
const lhs = try self.resolveInst(inst.args.lhs);
const rhs = try self.resolveInst(inst.args.rhs);
// There are 2 operands, destination and source.
// Either one, but not both, can be a memory operand.
// Source operand can be an immediate, 8 bits or 32 bits.
const dst_mcv = if (lhs.isImmediate() or (lhs.isMemory() and rhs.isMemory()))
try self.copyToNewRegister(inst.args.lhs)
else
lhs;
// This instruction supports only signed 32-bit immediates at most.
const src_mcv = try self.limitImmediateType(inst.args.rhs, i32);
try self.genX8664BinMathCode(inst.base.src, dst_mcv, src_mcv, 7, 0x38);
const info = inst.args.lhs.ty.intInfo(self.target.*);
if (info.signed) {
return MCValue{ .compare_flags_signed = inst.args.op };
} else {
return MCValue{ .compare_flags_unsigned = inst.args.op };
}
},
else => return self.fail(inst.base.src, "TODO implement cmp for {}", .{self.target.cpu.arch}),
}
}
fn genCondBr(self: *Function, inst: *ir.Inst.CondBr, comptime arch: std.Target.Cpu.Arch) !MCValue {
switch (arch) {
.x86_64 => {
try self.code.ensureCapacity(self.code.items.len + 6);
const cond = try self.resolveInst(inst.args.condition);
switch (cond) {
.compare_flags_signed => |cmp_op| {
// Here we map to the opposite opcode because the jump is to the false branch.
const opcode: u8 = switch (cmp_op) {
.gte => 0x8c,
.gt => 0x8e,
.neq => 0x84,
.lt => 0x8d,
.lte => 0x8f,
.eq => 0x85,
};
return self.genX86CondBr(inst, opcode, arch);
},
.compare_flags_unsigned => |cmp_op| {
// Here we map to the opposite opcode because the jump is to the false branch.
const opcode: u8 = switch (cmp_op) {
.gte => 0x82,
.gt => 0x86,
.neq => 0x84,
.lt => 0x83,
.lte => 0x87,
.eq => 0x85,
};
return self.genX86CondBr(inst, opcode, arch);
},
.register => |reg_usize| {
const reg = @intToEnum(Reg(arch), @intCast(u8, reg_usize));
// test reg, 1
// TODO detect al, ax, eax
try self.code.ensureCapacity(self.code.items.len + 4);
self.rex(.{ .b = reg.isExtended(), .w = reg.size() == 64 });
self.code.appendSliceAssumeCapacity(&[_]u8{
0xf6,
@as(u8, 0xC0) | (0 << 3) | @truncate(u3, reg.id()),
0x01,
});
return self.genX86CondBr(inst, 0x84, arch);
},
else => return self.fail(inst.base.src, "TODO implement condbr {} when condition is {}", .{ self.target.cpu.arch, @tagName(cond) }),
}
},
else => return self.fail(inst.base.src, "TODO implement condbr for {}", .{self.target.cpu.arch}),
}
}
fn genX86CondBr(self: *Function, inst: *ir.Inst.CondBr, opcode: u8, comptime arch: std.Target.Cpu.Arch) !MCValue {
self.code.appendSliceAssumeCapacity(&[_]u8{ 0x0f, opcode });
const reloc = Reloc{ .rel32 = self.code.items.len };
self.code.items.len += 4;
try self.genBody(inst.args.true_body, arch);
try self.performReloc(inst.base.src, reloc);
try self.genBody(inst.args.false_body, arch);
return MCValue.unreach;
}
fn genIsNull(self: *Function, inst: *ir.Inst.IsNull, comptime arch: std.Target.Cpu.Arch) !MCValue {
switch (arch) {
else => return self.fail(inst.base.src, "TODO implement isnull for {}", .{self.target.cpu.arch}),
}
}
fn genIsNonNull(self: *Function, inst: *ir.Inst.IsNonNull, comptime arch: std.Target.Cpu.Arch) !MCValue {
// Here you can specialize this instruction if it makes sense to, otherwise the default
// will call genIsNull and invert the result.
switch (arch) {
else => return self.fail(inst.base.src, "TODO call genIsNull and invert the result ", .{}),
}
}
fn genBlock(self: *Function, inst: *ir.Inst.Block, comptime arch: std.Target.Cpu.Arch) !MCValue {
if (inst.base.ty.hasCodeGenBits()) {
return self.fail(inst.base.src, "TODO codegen Block with non-void type", .{});
}
// A block is nothing but a setup to be able to jump to the end.
defer inst.codegen.relocs.deinit(self.gpa);
try self.genBody(inst.args.body, arch);
for (inst.codegen.relocs.items) |reloc| try self.performReloc(inst.base.src, reloc);
return MCValue.none;
}
fn performReloc(self: *Function, src: usize, reloc: Reloc) !void {
switch (reloc) {
.rel32 => |pos| {
const amt = self.code.items.len - (pos + 4);
const s32_amt = std.math.cast(i32, amt) catch
return self.fail(src, "unable to perform relocation: jump too far", .{});
mem.writeIntLittle(i32, self.code.items[pos..][0..4], s32_amt);
},
}
}
fn genBr(self: *Function, inst: *ir.Inst.Br, comptime arch: std.Target.Cpu.Arch) !MCValue {
if (!inst.args.operand.ty.hasCodeGenBits())
return self.brVoid(inst.base.src, inst.args.block, arch);
const operand = try self.resolveInst(inst.args.operand);
switch (arch) {
else => return self.fail(inst.base.src, "TODO implement br for {}", .{self.target.cpu.arch}),
}
}
fn genBrVoid(self: *Function, inst: *ir.Inst.BrVoid, comptime arch: std.Target.Cpu.Arch) !MCValue {
return self.brVoid(inst.base.src, inst.args.block, arch);
}
fn brVoid(self: *Function, src: usize, block: *ir.Inst.Block, comptime arch: std.Target.Cpu.Arch) !MCValue {
// Emit a jump with a relocation. It will be patched up after the block ends.
try block.codegen.relocs.ensureCapacity(self.gpa, block.codegen.relocs.items.len + 1);
switch (arch) {
.i386, .x86_64 => {
// TODO optimization opportunity: figure out when we can emit this as a 2 byte instruction
// which is available if the jump is 127 bytes or less forward.
try self.code.resize(self.code.items.len + 5);
self.code.items[self.code.items.len - 5] = 0xe9; // jmp rel32
// Leave the jump offset undefined
block.codegen.relocs.appendAssumeCapacity(.{ .rel32 = self.code.items.len - 4 });
},
else => return self.fail(src, "TODO implement brvoid for {}", .{self.target.cpu.arch}),
}
return .none;
}
fn genAsm(self: *Function, inst: *ir.Inst.Assembly, comptime arch: Target.Cpu.Arch) !MCValue {
if (!inst.args.is_volatile and inst.base.isUnused())
return MCValue.dead;
if (arch != .x86_64 and arch != .i386) {
return self.fail(inst.base.src, "TODO implement inline asm support for more architectures", .{});
}
for (inst.args.inputs) |input, i| {
if (input.len < 3 or input[0] != '{' or input[input.len - 1] != '}') {
return self.fail(inst.base.src, "unrecognized asm input constraint: '{}'", .{input});
}
const reg_name = input[1 .. input.len - 1];
const reg = parseRegName(arch, reg_name) orelse
return self.fail(inst.base.src, "unrecognized register: '{}'", .{reg_name});
const arg = try self.resolveInst(inst.args.args[i]);
try self.genSetReg(inst.base.src, arch, reg, arg);
}
if (mem.eql(u8, inst.args.asm_source, "syscall")) {
try self.code.appendSlice(&[_]u8{ 0x0f, 0x05 });
} else {
return self.fail(inst.base.src, "TODO implement support for more x86 assembly instructions", .{});
}
if (inst.args.output) |output| {
if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') {
return self.fail(inst.base.src, "unrecognized asm output constraint: '{}'", .{output});
}
const reg_name = output[2 .. output.len - 1];
const reg = parseRegName(arch, reg_name) orelse
return self.fail(inst.base.src, "unrecognized register: '{}'", .{reg_name});
return MCValue{ .register = @enumToInt(reg) };
} else {
return MCValue.none;
}
}
/// Encodes a REX prefix as specified, and appends it to the instruction
/// stream. This only modifies the instruction stream if at least one bit
/// is set true, which has a few implications:
///
/// * The length of the instruction buffer will be modified *if* the
/// resulting REX is meaningful, but will remain the same if it is not.
/// * Deliberately inserting a "meaningless REX" requires explicit usage of
/// 0x40, and cannot be done via this function.
fn rex(self: *Function, arg: struct { b: bool = false, w: bool = false, x: bool = false, r: bool = false }) void {
// From section 2.2.1.2 of the manual, REX is encoded as b0100WRXB.
var value: u8 = 0x40;
if (arg.b) {
value |= 0x1;
}
if (arg.x) {
value |= 0x2;
}
if (arg.r) {
value |= 0x4;
}
if (arg.w) {
value |= 0x8;
}
if (value != 0x40) {
self.code.appendAssumeCapacity(value);
}
}
fn genSetReg(self: *Function, src: usize, comptime arch: Target.Cpu.Arch, reg: Reg(arch), mcv: MCValue) error{ CodegenFail, OutOfMemory }!void {
switch (arch) {
.x86_64 => switch (mcv) {
.dead => unreachable,
.none => unreachable,
.unreach => unreachable,
.compare_flags_unsigned => |op| {
try self.code.ensureCapacity(self.code.items.len + 3);
self.rex(.{ .b = reg.isExtended(), .w = reg.size() == 64 });
const opcode: u8 = switch (op) {
.gte => 0x93,
.gt => 0x97,
.neq => 0x95,
.lt => 0x92,
.lte => 0x96,
.eq => 0x94,
};
const id = @as(u8, reg.id() & 0b111);
self.code.appendSliceAssumeCapacity(&[_]u8{ 0x0f, opcode, 0xC0 | id });
},
.compare_flags_signed => |op| {
return self.fail(src, "TODO set register with compare flags value (signed)", .{});
},
.immediate => |x| {
if (reg.size() != 64) {
return self.fail(src, "TODO decide whether to implement non-64-bit loads", .{});
}
// 32-bit moves zero-extend to 64-bit, so xoring the 32-bit
// register is the fastest way to zero a register.
if (x == 0) {
// The encoding for `xor r32, r32` is `0x31 /r`.
// Section 3.1.1.1 of the Intel x64 Manual states that "/r indicates that the
// ModR/M byte of the instruction contains a register operand and an r/m operand."
//
// R/M bytes are composed of two bits for the mode, then three bits for the register,
// then three bits for the operand. Since we're zeroing a register, the two three-bit
// values will be identical, and the mode is three (the raw register value).
//
// If we're accessing e.g. r8d, we need to use a REX prefix before the actual operation. Since
// this is a 32-bit operation, the W flag is set to zero. X is also zero, as we're not using a SIB.
// Both R and B are set, as we're extending, in effect, the register bits *and* the operand.
try self.code.ensureCapacity(self.code.items.len + 3);
self.rex(.{ .r = reg.isExtended(), .b = reg.isExtended() });
const id = @as(u8, reg.id() & 0b111);
self.code.appendSliceAssumeCapacity(&[_]u8{ 0x31, 0xC0 | id << 3 | id });
return;
}
if (x <= std.math.maxInt(u32)) {
// Next best case: if we set the lower four bytes, the upper four will be zeroed.
//
// The encoding for `mov IMM32 -> REG` is (0xB8 + R) IMM.
if (reg.isExtended()) {
// Just as with XORing, we need a REX prefix. This time though, we only
// need the B bit set, as we're extending the opcode's register field,
// and there is no Mod R/M byte.
//
// Thus, we need b01000001, or 0x41.
try self.code.resize(self.code.items.len + 6);
self.code.items[self.code.items.len - 6] = 0x41;
} else {
try self.code.resize(self.code.items.len + 5);
}
self.code.items[self.code.items.len - 5] = 0xB8 | @as(u8, reg.id() & 0b111);
const imm_ptr = self.code.items[self.code.items.len - 4 ..][0..4];
mem.writeIntLittle(u32, imm_ptr, @intCast(u32, x));
return;
}
// Worst case: we need to load the 64-bit register with the IMM. GNU's assemblers calls
// this `movabs`, though this is officially just a different variant of the plain `mov`
// instruction.
//
// This encoding is, in fact, the *same* as the one used for 32-bit loads. The only
// difference is that we set REX.W before the instruction, which extends the load to
// 64-bit and uses the full bit-width of the register.
//
// Since we always need a REX here, let's just check if we also need to set REX.B.
//
// In this case, the encoding of the REX byte is 0b0100100B
try self.code.ensureCapacity(self.code.items.len + 10);
self.rex(.{ .w = true, .b = reg.isExtended() });
self.code.items.len += 9;
self.code.items[self.code.items.len - 9] = 0xB8 | @as(u8, reg.id() & 0b111);
const imm_ptr = self.code.items[self.code.items.len - 8 ..][0..8];
mem.writeIntLittle(u64, imm_ptr, x);
},
.embedded_in_code => |code_offset| {
if (reg.size() != 64) {
return self.fail(src, "TODO decide whether to implement non-64-bit loads", .{});
}
// We need the offset from RIP in a signed i32 twos complement.
// The instruction is 7 bytes long and RIP points to the next instruction.
try self.code.ensureCapacity(self.code.items.len + 7);
// 64-bit LEA is encoded as REX.W 8D /r. If the register is extended, the REX byte is modified,
// but the operation size is unchanged. Since we're using a disp32, we want mode 0 and lower three
// bits as five.
// REX 0x8D 0b00RRR101, where RRR is the lower three bits of the id.
self.rex(.{ .w = true, .b = reg.isExtended() });
self.code.items.len += 6;
const rip = self.code.items.len;
const big_offset = @intCast(i64, code_offset) - @intCast(i64, rip);
const offset = @intCast(i32, big_offset);
self.code.items[self.code.items.len - 6] = 0x8D;
self.code.items[self.code.items.len - 5] = 0b101 | (@as(u8, reg.id() & 0b111) << 3);
const imm_ptr = self.code.items[self.code.items.len - 4 ..][0..4];
mem.writeIntLittle(i32, imm_ptr, offset);
},
.register => |r| {
if (reg.size() != 64) {
return self.fail(src, "TODO decide whether to implement non-64-bit loads", .{});
}
const src_reg = @intToEnum(Reg(arch), @intCast(u8, r));
// This is a variant of 8B /r. Since we're using 64-bit moves, we require a REX.
// This is thus three bytes: REX 0x8B R/M.
// If the destination is extended, the R field must be 1.
// If the *source* is extended, the B field must be 1.
// Since the register is being accessed directly, the R/M mode is three. The reg field (the middle
// three bits) contain the destination, and the R/M field (the lower three bits) contain the source.
try self.code.ensureCapacity(self.code.items.len + 3);
self.rex(.{ .w = true, .r = reg.isExtended(), .b = src_reg.isExtended() });
const R = 0xC0 | (@as(u8, reg.id() & 0b111) << 3) | @as(u8, src_reg.id() & 0b111);
self.code.appendSliceAssumeCapacity(&[_]u8{ 0x8B, R });
},
.memory => |x| {
if (reg.size() != 64) {
return self.fail(src, "TODO decide whether to implement non-64-bit loads", .{});
}
if (x <= std.math.maxInt(u32)) {
// Moving from memory to a register is a variant of `8B /r`.
// Since we're using 64-bit moves, we require a REX.
// This variant also requires a SIB, as it would otherwise be RIP-relative.
// We want mode zero with the lower three bits set to four to indicate an SIB with no other displacement.
// The SIB must be 0x25, to indicate a disp32 with no scaled index.
// 0b00RRR100, where RRR is the lower three bits of the register ID.
// The instruction is thus eight bytes; REX 0x8B 0b00RRR100 0x25 followed by a four-byte disp32.
try self.code.ensureCapacity(self.code.items.len + 8);
self.rex(.{ .w = true, .b = reg.isExtended() });
self.code.appendSliceAssumeCapacity(&[_]u8{
0x8B,
0x04 | (@as(u8, reg.id() & 0b111) << 3), // R
0x25,
});
mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), @intCast(u32, x));
} else {
// If this is RAX, we can use a direct load; otherwise, we need to load the address, then indirectly load
// the value.
if (reg.id() == 0) {
// REX.W 0xA1 moffs64*
// moffs64* is a 64-bit offset "relative to segment base", which really just means the
// absolute address for all practical purposes.
try self.code.resize(self.code.items.len + 10);
// REX.W == 0x48
self.code.items[self.code.items.len - 10] = 0x48;
self.code.items[self.code.items.len - 9] = 0xA1;
const imm_ptr = self.code.items[self.code.items.len - 8 ..][0..8];
mem.writeIntLittle(u64, imm_ptr, x);
} else {
// This requires two instructions; a move imm as used above, followed by an indirect load using the register
// as the address and the register as the destination.
//
// This cannot be used if the lower three bits of the id are equal to four or five, as there
// is no way to possibly encode it. This means that RSP, RBP, R12, and R13 cannot be used with
// this instruction.
const id3 = @truncate(u3, reg.id());
std.debug.assert(id3 != 4 and id3 != 5);
// Rather than duplicate the logic used for the move, we just use a self-call with a new MCValue.
try self.genSetReg(src, arch, reg, MCValue{ .immediate = x });
// Now, the register contains the address of the value to load into it
// Currently, we're only allowing 64-bit registers, so we need the `REX.W 8B /r` variant.
// TODO: determine whether to allow other sized registers, and if so, handle them properly.
// This operation requires three bytes: REX 0x8B R/M
try self.code.ensureCapacity(self.code.items.len + 3);
// For this operation, we want R/M mode *zero* (use register indirectly), and the two register
// values must match. Thus, it's 00ABCABC where ABC is the lower three bits of the register ID.
//
// Furthermore, if this is an extended register, both B and R must be set in the REX byte, as *both*
// register operands need to be marked as extended.
self.rex(.{ .w = true, .b = reg.isExtended(), .r = reg.isExtended() });
const RM = (@as(u8, reg.id() & 0b111) << 3) | @truncate(u3, reg.id());
self.code.appendSliceAssumeCapacity(&[_]u8{ 0x8B, RM });
}
}
},
.stack_offset => |off| {
return self.fail(src, "TODO implement genSetReg for stack variables", .{});
},
},
else => return self.fail(src, "TODO implement genSetReg for more architectures", .{}),
}
}
fn genPtrToInt(self: *Function, inst: *ir.Inst.PtrToInt) !MCValue {
// no-op
return self.resolveInst(inst.args.ptr);
}
fn genBitCast(self: *Function, inst: *ir.Inst.BitCast) !MCValue {
const operand = try self.resolveInst(inst.args.operand);
return operand;
}
fn resolveInst(self: *Function, inst: *ir.Inst) !MCValue {
// Constants have static lifetimes, so they are always memoized in the outer most table.
if (inst.cast(ir.Inst.Constant)) |const_inst| {
const branch = &self.branch_stack.items[0];
const gop = try branch.inst_table.getOrPut(self.gpa, inst);
if (!gop.found_existing) {
gop.entry.value = try self.genTypedValue(inst.src, .{ .ty = inst.ty, .val = const_inst.val });
}
return gop.entry.value;
}
// Treat each stack item as a "layer" on top of the previous one.
var i: usize = self.branch_stack.items.len;
while (true) {
i -= 1;
if (self.branch_stack.items[i].inst_table.get(inst)) |mcv| {
return mcv;
}
}
}
fn copyToNewRegister(self: *Function, inst: *ir.Inst) !MCValue {
return self.fail(inst.src, "TODO implement copyToNewRegister", .{});
}
/// If the MCValue is an immediate, and it does not fit within this type,
/// we put it in a register.
/// A potential opportunity for future optimization here would be keeping track
/// of the fact that the instruction is available both as an immediate
/// and as a register.
fn limitImmediateType(self: *Function, inst: *ir.Inst, comptime T: type) !MCValue {
const mcv = try self.resolveInst(inst);
const ti = @typeInfo(T).Int;
switch (mcv) {
.immediate => |imm| {
// This immediate is unsigned.
const U = @Type(.{
.Int = .{
.bits = ti.bits - @boolToInt(ti.is_signed),
.is_signed = false,
},
});
if (imm >= std.math.maxInt(U)) {
return self.copyToNewRegister(inst);
}
},
else => {},
}
return mcv;
}
fn genTypedValue(self: *Function, src: usize, typed_value: TypedValue) !MCValue {
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
switch (typed_value.ty.zigTypeTag()) {
.Pointer => {
if (typed_value.val.cast(Value.Payload.DeclRef)) |payload| {
const got = &self.bin_file.program_headers.items[self.bin_file.phdr_got_index.?];
const decl = payload.decl;
const got_addr = got.p_vaddr + decl.link.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
}
return self.fail(src, "TODO codegen more kinds of const pointers", .{});
},
.Int => {
const info = typed_value.ty.intInfo(self.target.*);
if (info.bits > ptr_bits or info.signed) {
return self.fail(src, "TODO const int bigger than ptr and signed int", .{});
}
return MCValue{ .immediate = typed_value.val.toUnsignedInt() };
},
.Bool => {
return MCValue{ .immediate = @boolToInt(typed_value.val.toBool()) };
},
.ComptimeInt => unreachable, // semantic analysis prevents this
.ComptimeFloat => unreachable, // semantic analysis prevents this
else => return self.fail(src, "TODO implement const of type '{}'", .{typed_value.ty}),
}
}
fn resolveParameters(
self: *Function,
src: usize,
cc: std.builtin.CallingConvention,
param_types: []const Type,
results: []MCValue,
) !u32 {
switch (self.target.cpu.arch) {
.x86_64 => {
switch (cc) {
.Naked => {
assert(results.len == 0);
return 0;
},
.Unspecified, .C => {
var next_int_reg: usize = 0;
var next_stack_offset: u32 = 0;
const integer_registers = [_]Reg(.x86_64){ .rdi, .rsi, .rdx, .rcx, .r8, .r9 };
for (param_types) |ty, i| {
switch (ty.zigTypeTag()) {
.Bool, .Int => {
if (next_int_reg >= integer_registers.len) {
results[i] = .{ .stack_offset = next_stack_offset };
next_stack_offset += @intCast(u32, ty.abiSize(self.target.*));
} else {
results[i] = .{ .register = @enumToInt(integer_registers[next_int_reg]) };
next_int_reg += 1;
}
},
else => return self.fail(src, "TODO implement function parameters of type {}", .{@tagName(ty.zigTypeTag())}),
}
}
return next_stack_offset;
},
else => return self.fail(src, "TODO implement function parameters for {}", .{cc}),
}
},
else => return self.fail(src, "TODO implement C ABI support for {}", .{self.target.cpu.arch}),
}
}
fn fail(self: *Function, src: usize, comptime format: []const u8, args: anytype) error{ CodegenFail, OutOfMemory } {
@setCold(true);
assert(self.err_msg == null);
self.err_msg = try ErrorMsg.create(self.bin_file.allocator, src, format, args);
return error.CodegenFail;
}
};
const x86_64 = @import("codegen/x86_64.zig");
const x86 = @import("codegen/x86.zig");
fn Reg(comptime arch: Target.Cpu.Arch) type {
return switch (arch) {
.i386 => x86.Register,
.x86_64 => x86_64.Register,
else => @compileError("TODO add more register enums"),
};
}
fn parseRegName(comptime arch: Target.Cpu.Arch, name: []const u8) ?Reg(arch) {
return std.meta.stringToEnum(Reg(arch), name);
} | src-self-hosted/codegen.zig |
const std = @import("std");
const UdpPacket = @This();
pub const Port = u16;
source_port: Port,
destination_port: Port,
checksum: u16,
payload: []u8,
// TODO: Handle Jumbograms properly
pub fn getLength(packet: UdpPacket) u16 {
return if (packet.payload.len + 8 > 65_535)
0
else
8 + @intCast(u16, packet.payload.len);
}
pub fn encode(packet: UdpPacket, writer: anytype) !void {
var buffer: [8]u8 = undefined;
var fbs_writer = std.io.fixedBufferStream(&buffer).writer();
try fbs_writer.writeIntBig(Port, packet.source_port);
try fbs_writer.writeIntBig(Port, packet.destination_port);
try fbs_writer.writeIntBig(u16, packet.getLength());
try fbs_writer.writeIntBig(u16, packet.checksum); // TODO: Calculate checksum!
try writer.writeAll(&buffer);
try writer.writeAll(packet.payload);
}
pub fn decode(allocator: *std.mem.Allocator, data: []const u8) !UdpPacket {
if (data.len < 8)
return error.TooShort;
var reader = std.io.fixedBufferStream(data).reader();
var packet: UdpPacket = undefined;
packet.source_port = try reader.readIntBig(Port);
packet.destination_port = try reader.readIntBig(Port);
var length = try reader.readIntBig(u16);
packet.checksum = try reader.readIntBig(u16);
packet.payload = try allocator.dupe(u8, if (length == 0)
data[8..]
else if (length >= 8)
data[8..length]
else
return error.TooShort);
return packet;
}
pub fn deinit(self: *UdpPacket, allocator: *std.mem.Allocator) void {
allocator.free(self.payload);
}
test "UDP packet encode / decode (xkcd.com DNS answer)" {
const raw_packet = @embedFile("test-data/udp/xkcd.com.bin");
var packet = try decode(std.testing.allocator, raw_packet);
defer packet.deinit(std.testing.allocator);
try std.testing.expectEqual(@as(Port, 53), packet.source_port);
try std.testing.expectEqual(@as(Port, 57452), packet.destination_port);
try std.testing.expectEqual(@as(u16, 146), packet.getLength());
try std.testing.expectEqual(@as(u16, 0x4eb1), packet.checksum);
// zig-fmt: off
const expected_payload = [_]u8{
0x00, 0x03, 0x81, 0x80, 0x00, 0x01, 0x00, 0x04,
0x00, 0x00, 0x00, 0x00, 0x04, 0x78, 0x6b, 0x63,
0x64, 0x03, 0x63, 0x6f, 0x6d, 0x00, 0x00, 0x1c,
0x00, 0x01, 0xc0, 0x0c, 0x00, 0x1c, 0x00, 0x01,
0x00, 0x00, 0x0e, 0x09, 0x00, 0x10, 0x2a, 0x04,
0x4e, 0x42, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0xc0, 0x0c,
0x00, 0x1c, 0x00, 0x01, 0x00, 0x00, 0x0e, 0x09,
0x00, 0x10, 0x2a, 0x04, 0x4e, 0x42, 0x02, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x67, 0xc0, 0x0c, 0x00, 0x1c, 0x00, 0x01,
0x00, 0x00, 0x0e, 0x09, 0x00, 0x10, 0x2a, 0x04,
0x4e, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0xc0, 0x0c,
0x00, 0x1c, 0x00, 0x01, 0x00, 0x00, 0x0e, 0x09,
0x00, 0x10, 0x2a, 0x04, 0x4e, 0x42, 0x06, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x67,
};
// zig-fmt: on
try std.testing.expectEqualSlices(u8, &expected_payload, packet.payload);
var buffer = std.ArrayList(u8).init(std.testing.allocator);
defer buffer.deinit();
try packet.encode(buffer.writer());
try std.testing.expectEqualSlices(u8, raw_packet, buffer.items);
} | src/UdpPacket.zig |
const std = @import("std");
const sdl = @import("sdl.zig");
pub const Audio = struct {
device: ?sdl.AudioDevice,
lastCallbackTime: u64,
// We'll allow up to 16 sounds to be played simultaneously
sounds: [16]?Sound,
next_sound_index: u8,
cursor: u64, // current play cursor in the "global" timeline
buffer_copy: [2 * 2048]i16 = [_]i16{0} ** (2 * 2048),
pub const Sound = struct {
offset: u64,
data: []const i16,
volume: u7,
};
pub fn init() Audio {
return Audio{
.device = null,
.lastCallbackTime = sdl.getPerformanceCounter(),
.sounds = [_]?Sound{undefined} ** 16,
.cursor = 0,
.next_sound_index = 0,
};
}
pub fn open(self: *Audio) !void {
self.device = try sdl.AudioDevice.init(
null,
false,
std.mem.zeroInit(sdl.SDL_AudioSpec, .{
.freq = 48000,
.format = sdl.AUDIO_S16LSB,
.channels = 2,
.samples = 2048,
.callback = struct {
export fn callback(userdata: ?*c_void, data: [*c]u8, len: i32) void {
var audio = @ptrCast(*Audio, @alignCast(@alignOf(*Audio), userdata));
var buffer = @ptrCast([*c]i16, @alignCast(@alignOf(i16), data))[0 .. @intCast(usize, len) / 2];
audio.callback(buffer);
}
}.callback,
.userdata = @ptrCast(?*c_void, self),
}),
0,
);
}
pub fn deinit(self: *Audio) void {
if (self.device) |device| {
device.deinit();
}
}
pub fn start(self: *Audio) void {
if (self.device) |device| {
device.play();
}
}
pub fn stop(self: *Audio) void {
if (self.device) |device| {
device.pause();
}
}
pub fn play(self: *Audio, sound: Sound) void {
self.sounds[self.next_sound_index] = Sound{
.offset = sound.offset + self.cursor,
.data = sound.data,
.volume = sound.volume,
};
self.next_sound_index = (self.next_sound_index + 1) % 16;
}
fn callback(self: *Audio, buffer: []i16) void {
std.debug.assert(self.device != null);
var newTime = sdl.getPerformanceCounter();
self.lastCallbackTime = newTime;
std.mem.set(i16, buffer, 0);
for (self.sounds) |maybe_sound, i| {
if (maybe_sound) |sound| {
if (self.cursor + buffer.len <= sound.offset) {
continue;
}
// Figure out where in the sound we should start copying
var soundBeginCursor = self.cursor - std.math.min(sound.offset, self.cursor);
// Sound is finished
if (soundBeginCursor >= sound.data.len) {
self.sounds[i] = undefined;
continue;
}
var bufferStartOffset: usize = 0;
// Sound should begin in the middle of current buffer
if (sound.offset > self.cursor) {
bufferStartOffset = sound.offset - self.cursor;
}
var copyLen = buffer.len - bufferStartOffset;
var soundEndCursor = soundBeginCursor + std.math.min(sound.data.len - soundBeginCursor, copyLen);
// sdl mixing
// sdl.mixAudio(i16, buffer[bufferStartOffset..], sound.data[soundBeginCursor..soundEndCursor], sdl.AUDIO_S16LSB, sound.volume);
//
// no mixing
// std.mem.copy(i16, buffer[bufferStartOffset..], sound.data[soundBeginCursor..soundEndCursor]);
// Custom mixing (just adding with clipping)
var bufferSlice = buffer[bufferStartOffset..];
var soundSlice = sound.data[soundBeginCursor..soundEndCursor];
var j: usize = 0;
while (j < soundSlice.len) : (j += 1) {
var result: i16 = undefined;
if (@addWithOverflow(i16, bufferSlice[j], soundSlice[j], &result)) {
if (result > 0) {
// Underflow
bufferSlice[j] = std.math.maxInt(i16);
} else {
// Overflow
bufferSlice[j] = std.math.minInt(i16);
}
} else {
bufferSlice[j] = result;
}
}
}
}
self.cursor += buffer.len;
std.mem.copy(i16, self.buffer_copy[0..self.buffer_copy.len], buffer);
}
}; | src/audio.zig |
const std = @import("std");
const debug = std.debug;
pub fn main() void {
debug.warn("02-2: ");
common_chars(input_02);
}
fn common_chars(ids: [] const [] const u8) void {
for (ids) |id, i| {
outer: for (ids[i + 1..]) |id2, j| {
if (i != j) {
debug.assert(id.len == id2.len);
var distance: u32 = 0;
for (id) |char, k| {
if (char != id2[k]) {
distance += 1;
}
if (distance > 1) {
continue :outer;
}
}
if (distance == 1) {
//debug.warn("{}\n", id);
//debug.warn("{}\n", id2);
for (id) |char, x| {
if (char == id2[x]) {
debug.warn("{c}", char);
}
}
debug.warn("\n");
break;
}
}
}
}
}
test "common chars" {
const ids = [] const [] const u8 {
"abcde",
"fghij",
"klmno",
"pqrst",
"fguij",
"axcye",
"wvxyz",
};
common_chars(ids);
}
const input_02 = [] const [] const u8 {
"luojygedpvsthptkxiwnaorzmq",
"lucjqgedppsbhftkxiwnaorlmq",
"lucjmgefpvsbhftkxiwnaorziq",
"lucjvgedpvsbxftkxiwpaorzmq",
"lrcjygedjvmbhftkxiwnaorzmq",
"lucjygedpvsbhftkxiwnootzmu",
"eucjygedpvsbhftbxiwnaorzfq",
"lulnygedpvsbhftkxrwnaorzmq",
"lucsygedpvsohftkxqwnaorzmq",
"lucjyaedpvsnhftkxiwnaorzyq",
"lunjygedpvsohftkxiwnaorzmb",
"lucjxgedpvsbhrtkxiwnamrzmq",
"lucjygevpvsbhftkxcwnaorzma",
"lucjbgedpvsbhftrxiwnaoazmq",
"llcjygkdpvhbhftkxiwnaorzmq",
"lmcjygxdpvsbhftkxswnaorzmq",
"lucpygedpvsbhftkxiwraorzmc",
"lucjbgrdpvsblftkxiwnaorzmq",
"lucjfgedpvsbhftkxiwnaurzmv",
"lucjygenpvsbhytkxiwnaorgmq",
"luqjyredsvsbhftkxiwnaorzmq",
"lucjygedpvavhftkxiwnaorumq",
"gucjygedpvsbhkxkxiwnaorzmq",
"lucjygedpvsbhftkxlwnaordcq",
"lucjygedpvibhfqkxiwnaorzmm",
"lucjegedpvsbaftkxewnaorzmq",
"kucjygeqpvsbhfokxiwnaorzmq",
"lugjygedwvsbhftkxiwnatrzmq",
"lucjygedqvsbhftdxiwnayrzmq",
"lucjygekpvsbuftkxiwnaqrzmq",
"lucjygedpvsbhfbkxiwnaoozdq",
"lscjygedpvzchftkxiwnaorzmq",
"luckygedpvsbxftkxiwnaorvmq",
"luyjygedgvsbhptkxiwnaorzmq",
"lmcjygedpvsbhfckxiwnaodzmq",
"lucmygedwvybhftkxiwnaorzmq",
"lgcjhgedavsbhftkxiwnaorzmq",
"lucjugedpvsbhftkxiwmaoozmq",
"lucjygedpvybhftkxkwnaorumq",
"lucjygedpvzbhfakxiwnaorzpq",
"lucjygedpvsbhftyxzwnajrzmq",
"lucjygedpvsdhfakxiwnoorzmq",
"luyjygeopvhbhftkxiwnaorzmq",
"lucjygadpvsbhntkxiwnaorzmx",
"lucjygedzvsbhftkiiwuaorzmq",
"sucjygodpvsbhftkxiwuaorzmq",
"euijygydpvsbhftkxiwnaorzmq",
"lucjlgeduvsbhftkxicnaorzmq",
"lucjdgedpvsbhfgkxiwnhorzmq",
"lucjymedpvsbhotkxiqnaorzmq",
"lucjygmdpvsbhftkxywnairzmq",
"lucjggedpvsbhfxkxiqnaorzmq",
"sucjygedpvsbhftkxiwnaorjmv",
"lucjlgedpvsbhftkxiwnairzmg",
"lucjygedppubhftkxijnaorzmq",
"lucjyxedpvsvhftkxlwnaorzmq",
"lucjygedpvxbhftkfiwyaorzmq",
"lucjygedposbhftkniwnaorzmw",
"lucjygewpvsbhftgxiwnavrzmq",
"lucjynedpvsbmftkaiwnaorzmq",
"lucjyhedpvzbhftkxiwncorzmq",
"lucjygedpvsbhfikpiwnaoezmq",
"lupjypedpvsbhftkjiwnaorzmq",
"lucjygudpvsbhfwkxivnaorzmq",
"lucjygrdpvsbhatkxzwnaorzmq",
"lucjbgmdpvsbhftkxihnaorzmq",
"lucjmgedpvpbhftkxiwnaorcmq",
"lucjygedpvskhfukmiwnaorzmq",
"lucjygedgvsbhftkxiwnvprzmq",
"lucjzgedppsbhytkxiwnaorzmq",
"lfcjypedpvsbhftrxiwnaorzmq",
"lucjyqldphsbhftkxiwnaorzmq",
"lucjygedpvsbhftzxewnaorzqq",
"lucjygeapvsbhftkxiinoorzmq",
"lucjygedpvszhftguiwnaorzmq",
"luojygedpvsbhftkxawnaornmq",
"lucjygedpcsboetkxiwnaorzmq",
"lufjygedpvfbhftaxiwnaorzmq",
"luciygedpvsbhftkxhwaaorzmq",
"lucjygedpvnbhftkaiwnaorzmc",
"lucjygedpvsbhftkxiwcaorbdq",
"lucjygelpvsbhftaxiwsaorzmq",
"lujjygedpssbhftkxiwnaorzmr",
"ludjygedpvsbhftkxiynaorzmj",
"lukjygeedvsbhftkxiwnaorzmq",
"lucjqpedpvsbhftkxiwnaozzmq",
"jucjygedpvsbhftkxgwnaorqmq",
"llwjygedpvsbhetkxiwnaorzmq",
"rucjygedpvsbhftkxiwndorymq",
"lucjygedpvsbhftvxswnaorwmq",
"lucjygerpvsbhfykxiwnaormmq",
"lucjynedpvsbhftkxijnaorziq",
"ljcjygedpvrbhftkeiwnaorzmq",
"lucjygedpnsbhftkxiwhaornmq",
"lucjygadpvsbhftkxibnaorzqq",
"lucjqgedpvsihftkxiwnaorzdq",
"lucjygedpvsqhfttjiwnaorzmq",
"llcjygedsvsbhftkxiwwaorzmq",
"lfckygedpvsbhftkxiunaorzmq",
"lucjyeedpdsbhftkxiwnaotzmq",
"lucjygedpvsbhftkoiwnaoqzcq",
"huwjvgedpvsbhftkxiwnaorzmq",
"lucjygldpvsbdhtkxiwnaorzmq",
"lycxygedpvsbhftmxiwnaorzmq",
"lucjygedpvsbhftyxianvorzmq",
"lucuygedpdsbhqtkxiwnaorzmq",
"lucjyggdpvsbhftkxiwnavremq",
"lucjyggdpvsbkftkxiwnaorbmq",
"luchyqedpvsbhftixiwnaorzmq",
"lpcnygedpvsbhftkxzwnaorzmq",
"lucjygedpvsihftkxiwfaortmq",
"lucjygvdpvsbhgtkxiwnamrzmq",
"lucjygodpvrbhqtkxiwnaorzmq",
"lucjygedpfsbhftkxipnaorzma",
"lucjygedpvsbhftkxpcjaorzmq",
"lucjygodbmsbhftkxiwnaorzmq",
"<KEY>",
"luxjygjdpvsbhltkxiwnaorzmq",
"lucxygedpvsbhftkxzwnaorjmq",
"luajygedpvsbhftzxiwaaorzmq",
"lhcjygedpvsqhftfxiwnaorzmq",
"lucjygecphsbhftkxiwnaprzmq",
"lucjygedpvsbhptkxifnaorqmq",
"lucjygedpvichftkpiwnaorzmq",
"lucjygedpcsbhstkxswnaorzmq",
"kucjygedpvsbhftkxiwbyorzmq",
"lfpjxgedpvsbhftkxiwnaorzmq",
"lucjytldpvsbhftkxiwdaorzmq",
"lufjygedpvfbhftbxiwnaorzmq",
"lucjygebpvgbhftkxipnaorzmq",
"luujygedpvdbhftkxiwnaorzmd",
"lucjygedpvsbhfbyxwwnaorzmq",
"lucjygedpvsbhftkxiwnaoqpmw",
"qucgygedpvsbhftkxiwnaortmq",
"ludjtgedpvsbhftkxiunaorzmq",
"lucjyiedovsbhftkxiwjaorzmq",
"lucjygedpysbjftoxiwnaorzmq",
"lumjygedpvsbuftkxiknaorzmq",
"lucjygedpvsbhfokxgonaorzmq",
"lucjygeqpvsbhftkfiwnaorzeq",
"lucjygedpvskhftkxiwntorkmq",
"luujygedpvsbhftkxiwraorzmt",
"<KEY>",
"jucjyfedcvsbhftkxiwnaorzmq",
"luujygedpnsehftkxiwnaorzmq",
"lucjygedpvszhfckxiwnaorzmi",
"lucjyredpvsbzftkpiwnaorzmq",
"<KEY>",
"<KEY>",
"<KEY>",
"vucjycedpvsbhftkxiwfaorzmq",
"luawygeapvsbhftkxiwnaorzmq",
"lucjygetpvsbhftkxiwnaafzmq",
"<KEY>",
"luolygedpvsbgftkxiwnaorzmq",
"likjygedpvsbhftkxiwnabrzmq",
"lucjygedovsbhftkxirpaorzmq",
"lucjygedphsshftkxqwnaorzmq",
"uuqjygewpvsbhftkxiwnaorzmq",
"lucjygedcvsbhftkxiwoarrzmq",
"<KEY>",
"<KEY>",
"lucjygedpvsblfxkxivnaorzmq",
"lucjygedpvsghftkxiwnaawzmq",
"yucjygedpgsbhftkxiwnaorzbq",
"lucjyweapvsbhftkxiwnaoezmq",
"lucjygevpvsbyftcxiwnaorzmq",
"luejygedovsbhftkxiwnqorzmq",
"lucjyqedpvsbhfbkxiwnaorzms",
"lucjypedpvsbhftwxiwnhorzmq",
"lucjygedpvsbhmtkviwxaorzmq",
"lucjogedpvpbhftkxiwnaorqmq",
"lucjygedpvsbhztkxkwnaoazmq",
"lucjyaedpvsbcftkxiwnaorzhq",
"lucjygbdpvkbhftkxiznaorzmq",
"lucpygedpvzbhftkxfwnaorzmq",
"lucjmgedpcsbhftkxiwnaoezmq",
"lucjygedyvsbbftkxiwnnorzmq",
"lucjyyedpvsbhftuxiwnaonzmq",
"<KEY>",
"uccjygedpvschftkxiwnaorzmq",
"lusjygedpvbbhqtkxiwnaorzmq",
"ducuygedpvsbhftkxiwnaorzyq",
"lucjygkdvwsbhftkxiwnaorzmq",
"cucjyyedpvsbhftkxiwnaerzmq",
"lucjygedavsbhftkxiwnkorzbq",
"lucjygedmvsyhftkxiwiaorzmq",
"lucjygeipvsbhfpkxiwnaorzpq",
"vucjugedvvsbhftkxiwnaorzmq",
"lucjyzedpvsbhftkxpwnaoozmq",
"lucjygedpvgbhftkxiwtaorzqq",
"lecjygedpvcwhftkxiwnaorzmq",
"lucjyghdpvsbhfcyxiwnaorzmq",
"lucjygedpvesqftkxiwnaorzmq",
"lucjyjehpvsbhftbxiwnaorzmq",
"lucjygedpvtbhdtkxignaorzmq",
"lucjygxdpgsbhftkxivnaorzmq",
"lucjygvdpvsbhftkpiwnaorzqq",
"lucjysedpvsbhftkxiwnalrzmc",
"lucjygedpvkbhjtkxiwnaorsmq",
"lucjygedpvsbvfgkxiwnaerzmq",
"lucjygedpvsihftkxilnaorzmu",
"lvcvygndpvsbhftkxiwnaorzmq",
"lucjysedpqsbhftkxiwnaordmq",
"lucsygeypvsbhftkwiwnaorzmq",
"lucjygewpotbhftkxiwnaorzmq",
"lucjysedpvsbhftkxiwnanrzmv",
"lucjygedpvsbhutkxiwnaoplmq",
"wucjygedpvsqbftkxiwnaorzmq",
"lacjygeepvsbhftkxiwnjorzmq",
"lucjygedpusyhftkxicnaorzmq",
"qucjyredpvsbhftkxiwnworzmq",
"lucjygedevsbhftkgiwnayrzmq",
"lucjygedpksbrftkliwnaorzmq",
"lucjygedpvsbhfgkxisnaorzeq",
"lucjygedpvhdhftkeiwnaorzmq",
"lucjsgedpvsboftkxiwnaorumq",
"luctygedpvsbhftouiwnaorzmq",
"lucjygedpvsjhfukjiwnaorzmq",
"lucjagrepvsbhftkxiwnaorzmq",
"lucjkgerpvsbhftkxiwnairzmq",
"turjygedpvsbnftkxiwnaorzmq",
"lbcjygedpvsbhftkdpwnaorzmq",
"lucpygedpvsbhftkxnwnoorzmq",
"jucjygedpvsbhbtkxicnaorzmq",
"lecjygedpvsbhftkriwnaogzmq",
"licjyvcdpvsbhftkxiwnaorzmq",
"lrcjygewpnsbhftkxiwnaorzmq",
"ltcxygedpvlbhftkxiwnaorzmq",
"luctygedpvhbhztkxiwnaorzmq",
"lucwygedplsbhfakxiwnaorzmq",
"lucjygedpnsbhftkxiwjaoezmq",
"lucpygedptsbhftkxiwnaorzmo",
"lucjygedpvibhqtkxiknaorzmq",
"lucjwgqdpvrbhftkxiwnaorzmq",
"lucjmgkdpvsbhftkxiwraorzmq",
"lucjygwupvsbhftkxiznaorzmq",
"lucjhgedpvobhftkxiwncorzmq",
"lucjygedpvsbhftkxiwnaohtmj",
"lucjygedpvsbeftkfiwnaorzyq",
"lucjygcdpvsbpftkhiwnaorzmq",
"lucjygedpmsbhftkxiwnkouzmq",
"oucjygedpvsbyftkximnaorzmq",
"lucjcgedpvsbhftkxywnforzmq",
"lfcjygedfvsbdftkxiwnaorzmq",
"ducjygedevsbhfttxiwnaorzmq",
"ldcjdgedpvsbhftkxiwnavrzmq",
"lucjymedmvsbhqtkxiwnaorzmq",
"lucjygedpvabhftkxiwnasrlmq",
"lucjygefpvsbhftkxmwnaorkmq",
}; | 2018/day_02_2.zig |
const std = @import("std");
const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
const AutoHashMap = std.AutoHashMap;
const StringHashMap = std.StringHashMap;
const BitSet = std.DynamicBitSet;
const Str = []const u8;
const util = @import("util.zig");
const gpa = util.gpa;
// const data = @embedFile("../data/test/day14.txt");
const data = @embedFile("../data/puzzle/day14.txt");
const Pair = struct {
name: []const u8,
idx: usize,
};
const Rule = struct {
parent: Pair,
left: Pair,
right: Pair,
};
fn getPair(pairs: *StringHashMap(Pair), word: []const u8) Pair {
if (!pairs.contains(word)) {
var idx = pairs.count();
var p = pairs.allocator.alloc(u8, word.len) catch unreachable;
std.mem.copy(u8, p, word);
pairs.put(p, .{ .name = p, .idx = idx }) catch unreachable;
}
return pairs.getPtr(word).?.*;
}
pub fn main() !void {
var pairs = StringHashMap(Pair).init(gpa);
var rules = AutoHashMap(usize, Rule).init(gpa);
var counts = ArrayList(u64).init(gpa);
var letter_counts: [128]u64 = std.mem.zeroes([128]u64);
defer {
rules.deinit();
var it = pairs.valueIterator();
pairs.deinit();
counts.deinit();
}
var lines = tokenize(data, "\r\n");
var startLine = lines.next().?;
while (lines.next()) |line| {
var words = split(line, " -> ");
const parent = getPair(&pairs, words.next().?[0..2]);
const dest = words.next().?[0];
var left_buf: [2:0]u8 = "AB".*;
left_buf[0] = parent.name[0];
left_buf[1] = dest;
var right_buf: [2:0]u8 = "AB".*;
right_buf[0] = dest;
right_buf[1] = parent.name[1];
const left: []const u8 = &left_buf;
const right: []const u8 = &right_buf;
var rule = Rule{
.parent = parent,
.left = getPair(&pairs, left),
.right = getPair(&pairs, right),
};
rules.put(parent.idx, rule) catch unreachable;
}
{
var idx: usize = 0;
while (idx < rules.count()) : (idx += 1) {
const rule = rules.getPtr(idx).?.*;
print("{s}({: >2}) -> {s} {s}\n", .{ rule.parent.name, rule.parent.idx, rule.left.name, rule.right.name });
counts.append(0) catch unreachable;
}
}
{
var idx: usize = 0;
while (idx < startLine.len - 1) : (idx += 1) {
const p = getPair(&pairs, startLine[idx .. idx + 2]);
counts.items[p.idx] += 1;
}
}
print("{s}\n", .{startLine});
for (counts.items) |val| {
print("{}", .{val});
}
print("\n", .{});
var max_iteration: usize = 40;
{
var lastPair = getPair(&pairs, startLine[startLine.len - 2 .. startLine.len]);
var iteration: u64 = 0;
while (iteration < max_iteration) : (iteration += 1) {
var old: []u64 = gpa.alloc(u64, counts.items.len) catch unreachable;
defer gpa.free(old);
lastPair = rules.getPtr(lastPair.idx).?.right;
std.mem.copy(u64, old, counts.items);
std.mem.set(u64, counts.items, 0);
for (old) |old_count, idx| {
const rule = rules.getPtr(idx).?.*;
counts.items[rule.left.idx] += old_count;
counts.items[rule.right.idx] += old_count;
}
}
for (counts.items) |count, idx| {
const rule = rules.getPtr(idx).?.*;
letter_counts[rule.parent.name[0]] += count;
}
letter_counts[lastPair.name[1]] += 1;
}
{
var let: u8 = 'A';
var maximum: u64 = 0;
var minimum: u64 = 999999999999999;
while (let <= 'Z') : (let += 1) {
if (letter_counts[let] != 0) {
maximum = max(maximum, letter_counts[let]);
minimum = min(minimum, letter_counts[let]);
}
print("{c}: {}\n", .{ let, letter_counts[let] });
}
print("{} {} {}\n", .{ maximum, minimum, maximum - minimum });
}
}
// Useful stdlib functions
const tokenize = std.mem.tokenize;
const split = std.mem.split;
const indexOf = std.mem.indexOfScalar;
const indexOfAny = std.mem.indexOfAny;
const indexOfStr = std.mem.indexOfPosLinear;
const lastIndexOf = std.mem.lastIndexOfScalar;
const lastIndexOfAny = std.mem.lastIndexOfAny;
const lastIndexOfStr = std.mem.lastIndexOfLinear;
const trim = std.mem.trim;
const sliceMin = std.mem.min;
const sliceMax = std.mem.max;
const parseInt = std.fmt.parseInt;
const parseFloat = std.fmt.parseFloat;
const min = std.math.min;
const min3 = std.math.min3;
const max = std.math.max;
const max3 = std.math.max3;
const print = std.debug.print;
const assert = std.debug.assert;
const sort = std.sort.sort;
const asc = std.sort.asc;
const desc = std.sort.desc; | src/day14.zig |
const std = @import("std");
const testing = std.testing;
const math = std.math;
const fixXfYi = @import("fixXfYi.zig").fixXfYi;
// Conversion from f32
const __fixsfsi = @import("fixXfYi.zig").__fixsfsi;
const __fixunssfsi = @import("fixXfYi.zig").__fixunssfsi;
const __fixsfdi = @import("fixXfYi.zig").__fixsfdi;
const __fixunssfdi = @import("fixXfYi.zig").__fixunssfdi;
const __fixsfti = @import("fixXfYi.zig").__fixsfti;
const __fixunssfti = @import("fixXfYi.zig").__fixunssfti;
// Conversion from f64
const __fixdfsi = @import("fixXfYi.zig").__fixdfsi;
const __fixunsdfsi = @import("fixXfYi.zig").__fixunsdfsi;
const __fixdfdi = @import("fixXfYi.zig").__fixdfdi;
const __fixunsdfdi = @import("fixXfYi.zig").__fixunsdfdi;
const __fixdfti = @import("fixXfYi.zig").__fixdfti;
const __fixunsdfti = @import("fixXfYi.zig").__fixunsdfti;
// Conversion from f128
const __fixtfsi = @import("fixXfYi.zig").__fixtfsi;
const __fixunstfsi = @import("fixXfYi.zig").__fixunstfsi;
const __fixtfdi = @import("fixXfYi.zig").__fixtfdi;
const __fixunstfdi = @import("fixXfYi.zig").__fixunstfdi;
const __fixtfti = @import("fixXfYi.zig").__fixtfti;
const __fixunstfti = @import("fixXfYi.zig").__fixunstfti;
fn test__fixsfsi(a: f32, expected: i32) !void {
const x = __fixsfsi(a);
try testing.expect(x == expected);
}
fn test__fixunssfsi(a: f32, expected: u32) !void {
const x = __fixunssfsi(a);
try testing.expect(x == expected);
}
test "fixsfsi" {
try test__fixsfsi(-math.floatMax(f32), math.minInt(i32));
try test__fixsfsi(-0x1.FFFFFFFFFFFFFp+1023, math.minInt(i32));
try test__fixsfsi(-0x1.FFFFFFFFFFFFFp+1023, -0x80000000);
try test__fixsfsi(-0x1.0000000000000p+127, -0x80000000);
try test__fixsfsi(-0x1.FFFFFFFFFFFFFp+126, -0x80000000);
try test__fixsfsi(-0x1.FFFFFFFFFFFFEp+126, -0x80000000);
try test__fixsfsi(-0x1.0000000000001p+63, -0x80000000);
try test__fixsfsi(-0x1.0000000000000p+63, -0x80000000);
try test__fixsfsi(-0x1.FFFFFFFFFFFFFp+62, -0x80000000);
try test__fixsfsi(-0x1.FFFFFFFFFFFFEp+62, -0x80000000);
try test__fixsfsi(-0x1.FFFFFEp+62, -0x80000000);
try test__fixsfsi(-0x1.FFFFFCp+62, -0x80000000);
try test__fixsfsi(-0x1.000000p+31, -0x80000000);
try test__fixsfsi(-0x1.FFFFFFp+30, -0x80000000);
try test__fixsfsi(-0x1.FFFFFEp+30, -0x7FFFFF80);
try test__fixsfsi(-0x1.FFFFFCp+30, -0x7FFFFF00);
try test__fixsfsi(-2.01, -2);
try test__fixsfsi(-2.0, -2);
try test__fixsfsi(-1.99, -1);
try test__fixsfsi(-1.0, -1);
try test__fixsfsi(-0.99, 0);
try test__fixsfsi(-0.5, 0);
try test__fixsfsi(-math.floatMin(f32), 0);
try test__fixsfsi(0.0, 0);
try test__fixsfsi(math.floatMin(f32), 0);
try test__fixsfsi(0.5, 0);
try test__fixsfsi(0.99, 0);
try test__fixsfsi(1.0, 1);
try test__fixsfsi(1.5, 1);
try test__fixsfsi(1.99, 1);
try test__fixsfsi(2.0, 2);
try test__fixsfsi(2.01, 2);
try test__fixsfsi(0x1.FFFFFCp+30, 0x7FFFFF00);
try test__fixsfsi(0x1.FFFFFEp+30, 0x7FFFFF80);
try test__fixsfsi(0x1.FFFFFFp+30, 0x7FFFFFFF);
try test__fixsfsi(0x1.000000p+31, 0x7FFFFFFF);
try test__fixsfsi(0x1.FFFFFCp+62, 0x7FFFFFFF);
try test__fixsfsi(0x1.FFFFFEp+62, 0x7FFFFFFF);
try test__fixsfsi(0x1.FFFFFFFFFFFFEp+62, 0x7FFFFFFF);
try test__fixsfsi(0x1.FFFFFFFFFFFFFp+62, 0x7FFFFFFF);
try test__fixsfsi(0x1.0000000000000p+63, 0x7FFFFFFF);
try test__fixsfsi(0x1.0000000000001p+63, 0x7FFFFFFF);
try test__fixsfsi(0x1.FFFFFFFFFFFFEp+126, 0x7FFFFFFF);
try test__fixsfsi(0x1.FFFFFFFFFFFFFp+126, 0x7FFFFFFF);
try test__fixsfsi(0x1.0000000000000p+127, 0x7FFFFFFF);
try test__fixsfsi(0x1.FFFFFFFFFFFFFp+1023, 0x7FFFFFFF);
try test__fixsfsi(0x1.FFFFFFFFFFFFFp+1023, math.maxInt(i32));
try test__fixsfsi(math.floatMax(f32), math.maxInt(i32));
}
test "fixunssfsi" {
try test__fixunssfsi(0.0, 0);
try test__fixunssfsi(0.5, 0);
try test__fixunssfsi(0.99, 0);
try test__fixunssfsi(1.0, 1);
try test__fixunssfsi(1.5, 1);
try test__fixunssfsi(1.99, 1);
try test__fixunssfsi(2.0, 2);
try test__fixunssfsi(2.01, 2);
try test__fixunssfsi(-0.5, 0);
try test__fixunssfsi(-0.99, 0);
try test__fixunssfsi(-1.0, 0);
try test__fixunssfsi(-1.5, 0);
try test__fixunssfsi(-1.99, 0);
try test__fixunssfsi(-2.0, 0);
try test__fixunssfsi(-2.01, 0);
try test__fixunssfsi(0x1.000000p+31, 0x80000000);
try test__fixunssfsi(0x1.000000p+32, 0xFFFFFFFF);
try test__fixunssfsi(0x1.FFFFFEp+31, 0xFFFFFF00);
try test__fixunssfsi(0x1.FFFFFEp+30, 0x7FFFFF80);
try test__fixunssfsi(0x1.FFFFFCp+30, 0x7FFFFF00);
try test__fixunssfsi(-0x1.FFFFFEp+30, 0);
try test__fixunssfsi(-0x1.FFFFFCp+30, 0);
}
fn test__fixsfdi(a: f32, expected: i64) !void {
const x = __fixsfdi(a);
try testing.expect(x == expected);
}
fn test__fixunssfdi(a: f32, expected: u64) !void {
const x = __fixunssfdi(a);
try testing.expect(x == expected);
}
test "fixsfdi" {
try test__fixsfdi(-math.floatMax(f32), math.minInt(i64));
try test__fixsfdi(-0x1.FFFFFFFFFFFFFp+1023, math.minInt(i64));
try test__fixsfdi(-0x1.FFFFFFFFFFFFFp+1023, -0x8000000000000000);
try test__fixsfdi(-0x1.0000000000000p+127, -0x8000000000000000);
try test__fixsfdi(-0x1.FFFFFFFFFFFFFp+126, -0x8000000000000000);
try test__fixsfdi(-0x1.FFFFFFFFFFFFEp+126, -0x8000000000000000);
try test__fixsfdi(-0x1.0000000000001p+63, -0x8000000000000000);
try test__fixsfdi(-0x1.0000000000000p+63, -0x8000000000000000);
try test__fixsfdi(-0x1.FFFFFFFFFFFFFp+62, -0x8000000000000000);
try test__fixsfdi(-0x1.FFFFFFFFFFFFEp+62, -0x8000000000000000);
try test__fixsfdi(-0x1.FFFFFFp+62, -0x8000000000000000);
try test__fixsfdi(-0x1.FFFFFEp+62, -0x7fffff8000000000);
try test__fixsfdi(-0x1.FFFFFCp+62, -0x7fffff0000000000);
try test__fixsfdi(-2.01, -2);
try test__fixsfdi(-2.0, -2);
try test__fixsfdi(-1.99, -1);
try test__fixsfdi(-1.0, -1);
try test__fixsfdi(-0.99, 0);
try test__fixsfdi(-0.5, 0);
try test__fixsfdi(-math.floatMin(f32), 0);
try test__fixsfdi(0.0, 0);
try test__fixsfdi(math.floatMin(f32), 0);
try test__fixsfdi(0.5, 0);
try test__fixsfdi(0.99, 0);
try test__fixsfdi(1.0, 1);
try test__fixsfdi(1.5, 1);
try test__fixsfdi(1.99, 1);
try test__fixsfdi(2.0, 2);
try test__fixsfdi(2.01, 2);
try test__fixsfdi(0x1.FFFFFCp+62, 0x7FFFFF0000000000);
try test__fixsfdi(0x1.FFFFFEp+62, 0x7FFFFF8000000000);
try test__fixsfdi(0x1.FFFFFFp+62, 0x7FFFFFFFFFFFFFFF);
try test__fixsfdi(0x1.FFFFFFFFFFFFEp+62, 0x7FFFFFFFFFFFFFFF);
try test__fixsfdi(0x1.FFFFFFFFFFFFFp+62, 0x7FFFFFFFFFFFFFFF);
try test__fixsfdi(0x1.0000000000000p+63, 0x7FFFFFFFFFFFFFFF);
try test__fixsfdi(0x1.0000000000001p+63, 0x7FFFFFFFFFFFFFFF);
try test__fixsfdi(0x1.FFFFFFFFFFFFEp+126, 0x7FFFFFFFFFFFFFFF);
try test__fixsfdi(0x1.FFFFFFFFFFFFFp+126, 0x7FFFFFFFFFFFFFFF);
try test__fixsfdi(0x1.0000000000000p+127, 0x7FFFFFFFFFFFFFFF);
try test__fixsfdi(0x1.FFFFFFFFFFFFFp+1023, 0x7FFFFFFFFFFFFFFF);
try test__fixsfdi(0x1.FFFFFFFFFFFFFp+1023, math.maxInt(i64));
try test__fixsfdi(math.floatMax(f32), math.maxInt(i64));
}
test "fixunssfdi" {
try test__fixunssfdi(0.0, 0);
try test__fixunssfdi(0.5, 0);
try test__fixunssfdi(0.99, 0);
try test__fixunssfdi(1.0, 1);
try test__fixunssfdi(1.5, 1);
try test__fixunssfdi(1.99, 1);
try test__fixunssfdi(2.0, 2);
try test__fixunssfdi(2.01, 2);
try test__fixunssfdi(-0.5, 0);
try test__fixunssfdi(-0.99, 0);
try test__fixunssfdi(-1.0, 0);
try test__fixunssfdi(-1.5, 0);
try test__fixunssfdi(-1.99, 0);
try test__fixunssfdi(-2.0, 0);
try test__fixunssfdi(-2.01, 0);
try test__fixunssfdi(0x1.FFFFFEp+63, 0xFFFFFF0000000000);
try test__fixunssfdi(0x1.000000p+63, 0x8000000000000000);
try test__fixunssfdi(0x1.FFFFFEp+62, 0x7FFFFF8000000000);
try test__fixunssfdi(0x1.FFFFFCp+62, 0x7FFFFF0000000000);
try test__fixunssfdi(-0x1.FFFFFEp+62, 0x0000000000000000);
try test__fixunssfdi(-0x1.FFFFFCp+62, 0x0000000000000000);
}
fn test__fixsfti(a: f32, expected: i128) !void {
const x = __fixsfti(a);
try testing.expect(x == expected);
}
fn test__fixunssfti(a: f32, expected: u128) !void {
const x = __fixunssfti(a);
try testing.expect(x == expected);
}
test "fixsfti" {
try test__fixsfti(-math.floatMax(f32), math.minInt(i128));
try test__fixsfti(-0x1.FFFFFFFFFFFFFp+1023, math.minInt(i128));
try test__fixsfti(-0x1.FFFFFFFFFFFFFp+1023, -0x80000000000000000000000000000000);
try test__fixsfti(-0x1.0000000000000p+127, -0x80000000000000000000000000000000);
try test__fixsfti(-0x1.FFFFFFFFFFFFFp+126, -0x80000000000000000000000000000000);
try test__fixsfti(-0x1.FFFFFFFFFFFFEp+126, -0x80000000000000000000000000000000);
try test__fixsfti(-0x1.FFFFFF0000000p+126, -0x80000000000000000000000000000000);
try test__fixsfti(-0x1.FFFFFE0000000p+126, -0x7FFFFF80000000000000000000000000);
try test__fixsfti(-0x1.FFFFFC0000000p+126, -0x7FFFFF00000000000000000000000000);
try test__fixsfti(-0x1.0000000000001p+63, -0x8000000000000000);
try test__fixsfti(-0x1.0000000000000p+63, -0x8000000000000000);
try test__fixsfti(-0x1.FFFFFFFFFFFFFp+62, -0x8000000000000000);
try test__fixsfti(-0x1.FFFFFFFFFFFFEp+62, -0x8000000000000000);
try test__fixsfti(-0x1.FFFFFFp+62, -0x8000000000000000);
try test__fixsfti(-0x1.FFFFFEp+62, -0x7fffff8000000000);
try test__fixsfti(-0x1.FFFFFCp+62, -0x7fffff0000000000);
try test__fixsfti(-0x1.000000p+31, -0x80000000);
try test__fixsfti(-0x1.FFFFFFp+30, -0x80000000);
try test__fixsfti(-0x1.FFFFFEp+30, -0x7FFFFF80);
try test__fixsfti(-0x1.FFFFFCp+30, -0x7FFFFF00);
try test__fixsfti(-2.01, -2);
try test__fixsfti(-2.0, -2);
try test__fixsfti(-1.99, -1);
try test__fixsfti(-1.0, -1);
try test__fixsfti(-0.99, 0);
try test__fixsfti(-0.5, 0);
try test__fixsfti(-math.floatMin(f32), 0);
try test__fixsfti(0.0, 0);
try test__fixsfti(math.floatMin(f32), 0);
try test__fixsfti(0.5, 0);
try test__fixsfti(0.99, 0);
try test__fixsfti(1.0, 1);
try test__fixsfti(1.5, 1);
try test__fixsfti(1.99, 1);
try test__fixsfti(2.0, 2);
try test__fixsfti(2.01, 2);
try test__fixsfti(0x1.FFFFFCp+30, 0x7FFFFF00);
try test__fixsfti(0x1.FFFFFEp+30, 0x7FFFFF80);
try test__fixsfti(0x1.FFFFFFp+30, 0x80000000);
try test__fixsfti(0x1.000000p+31, 0x80000000);
try test__fixsfti(0x1.FFFFFCp+62, 0x7FFFFF0000000000);
try test__fixsfti(0x1.FFFFFEp+62, 0x7FFFFF8000000000);
try test__fixsfti(0x1.FFFFFFp+62, 0x8000000000000000);
try test__fixsfti(0x1.FFFFFFFFFFFFEp+62, 0x8000000000000000);
try test__fixsfti(0x1.FFFFFFFFFFFFFp+62, 0x8000000000000000);
try test__fixsfti(0x1.0000000000000p+63, 0x8000000000000000);
try test__fixsfti(0x1.0000000000001p+63, 0x8000000000000000);
try test__fixsfti(0x1.FFFFFC0000000p+126, 0x7FFFFF00000000000000000000000000);
try test__fixsfti(0x1.FFFFFE0000000p+126, 0x7FFFFF80000000000000000000000000);
try test__fixsfti(0x1.FFFFFF0000000p+126, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
try test__fixsfti(0x1.FFFFFFFFFFFFEp+126, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
try test__fixsfti(0x1.FFFFFFFFFFFFFp+126, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
try test__fixsfti(0x1.0000000000000p+127, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
try test__fixsfti(0x1.FFFFFFFFFFFFFp+1023, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
try test__fixsfti(0x1.FFFFFFFFFFFFFp+1023, math.maxInt(i128));
try test__fixsfti(math.floatMax(f32), math.maxInt(i128));
}
test "fixunssfti" {
try test__fixunssfti(0.0, 0);
try test__fixunssfti(0.5, 0);
try test__fixunssfti(0.99, 0);
try test__fixunssfti(1.0, 1);
try test__fixunssfti(1.5, 1);
try test__fixunssfti(1.99, 1);
try test__fixunssfti(2.0, 2);
try test__fixunssfti(2.01, 2);
try test__fixunssfti(-0.5, 0);
try test__fixunssfti(-0.99, 0);
try test__fixunssfti(-1.0, 0);
try test__fixunssfti(-1.5, 0);
try test__fixunssfti(-1.99, 0);
try test__fixunssfti(-2.0, 0);
try test__fixunssfti(-2.01, 0);
try test__fixunssfti(0x1.FFFFFEp+63, 0xFFFFFF0000000000);
try test__fixunssfti(0x1.000000p+63, 0x8000000000000000);
try test__fixunssfti(0x1.FFFFFEp+62, 0x7FFFFF8000000000);
try test__fixunssfti(0x1.FFFFFCp+62, 0x7FFFFF0000000000);
try test__fixunssfti(0x1.FFFFFEp+127, 0xFFFFFF00000000000000000000000000);
try test__fixunssfti(0x1.000000p+127, 0x80000000000000000000000000000000);
try test__fixunssfti(0x1.FFFFFEp+126, 0x7FFFFF80000000000000000000000000);
try test__fixunssfti(0x1.FFFFFCp+126, 0x7FFFFF00000000000000000000000000);
try test__fixunssfti(-0x1.FFFFFEp+62, 0x0000000000000000);
try test__fixunssfti(-0x1.FFFFFCp+62, 0x0000000000000000);
try test__fixunssfti(-0x1.FFFFFEp+126, 0x0000000000000000);
try test__fixunssfti(-0x1.FFFFFCp+126, 0x0000000000000000);
try test__fixunssfti(math.floatMax(f32), 0xffffff00000000000000000000000000);
try test__fixunssfti(math.inf(f32), math.maxInt(u128));
}
fn test__fixdfsi(a: f64, expected: i32) !void {
const x = __fixdfsi(a);
try testing.expect(x == expected);
}
fn test__fixunsdfsi(a: f64, expected: u32) !void {
const x = __fixunsdfsi(a);
try testing.expect(x == expected);
}
test "fixdfsi" {
try test__fixdfsi(-math.floatMax(f64), math.minInt(i32));
try test__fixdfsi(-0x1.FFFFFFFFFFFFFp+1023, math.minInt(i32));
try test__fixdfsi(-0x1.FFFFFFFFFFFFFp+1023, -0x80000000);
try test__fixdfsi(-0x1.0000000000000p+127, -0x80000000);
try test__fixdfsi(-0x1.FFFFFFFFFFFFFp+126, -0x80000000);
try test__fixdfsi(-0x1.FFFFFFFFFFFFEp+126, -0x80000000);
try test__fixdfsi(-0x1.0000000000001p+63, -0x80000000);
try test__fixdfsi(-0x1.0000000000000p+63, -0x80000000);
try test__fixdfsi(-0x1.FFFFFFFFFFFFFp+62, -0x80000000);
try test__fixdfsi(-0x1.FFFFFFFFFFFFEp+62, -0x80000000);
try test__fixdfsi(-0x1.FFFFFEp+62, -0x80000000);
try test__fixdfsi(-0x1.FFFFFCp+62, -0x80000000);
try test__fixdfsi(-0x1.000000p+31, -0x80000000);
try test__fixdfsi(-0x1.FFFFFFp+30, -0x7FFFFFC0);
try test__fixdfsi(-0x1.FFFFFEp+30, -0x7FFFFF80);
try test__fixdfsi(-2.01, -2);
try test__fixdfsi(-2.0, -2);
try test__fixdfsi(-1.99, -1);
try test__fixdfsi(-1.0, -1);
try test__fixdfsi(-0.99, 0);
try test__fixdfsi(-0.5, 0);
try test__fixdfsi(-math.floatMin(f64), 0);
try test__fixdfsi(0.0, 0);
try test__fixdfsi(math.floatMin(f64), 0);
try test__fixdfsi(0.5, 0);
try test__fixdfsi(0.99, 0);
try test__fixdfsi(1.0, 1);
try test__fixdfsi(1.5, 1);
try test__fixdfsi(1.99, 1);
try test__fixdfsi(2.0, 2);
try test__fixdfsi(2.01, 2);
try test__fixdfsi(0x1.FFFFFEp+30, 0x7FFFFF80);
try test__fixdfsi(0x1.FFFFFFp+30, 0x7FFFFFC0);
try test__fixdfsi(0x1.000000p+31, 0x7FFFFFFF);
try test__fixdfsi(0x1.FFFFFCp+62, 0x7FFFFFFF);
try test__fixdfsi(0x1.FFFFFEp+62, 0x7FFFFFFF);
try test__fixdfsi(0x1.FFFFFFFFFFFFEp+62, 0x7FFFFFFF);
try test__fixdfsi(0x1.FFFFFFFFFFFFFp+62, 0x7FFFFFFF);
try test__fixdfsi(0x1.0000000000000p+63, 0x7FFFFFFF);
try test__fixdfsi(0x1.0000000000001p+63, 0x7FFFFFFF);
try test__fixdfsi(0x1.FFFFFFFFFFFFEp+126, 0x7FFFFFFF);
try test__fixdfsi(0x1.FFFFFFFFFFFFFp+126, 0x7FFFFFFF);
try test__fixdfsi(0x1.0000000000000p+127, 0x7FFFFFFF);
try test__fixdfsi(0x1.FFFFFFFFFFFFFp+1023, 0x7FFFFFFF);
try test__fixdfsi(0x1.FFFFFFFFFFFFFp+1023, math.maxInt(i32));
try test__fixdfsi(math.floatMax(f64), math.maxInt(i32));
}
test "fixunsdfsi" {
try test__fixunsdfsi(0.0, 0);
try test__fixunsdfsi(0.5, 0);
try test__fixunsdfsi(0.99, 0);
try test__fixunsdfsi(1.0, 1);
try test__fixunsdfsi(1.5, 1);
try test__fixunsdfsi(1.99, 1);
try test__fixunsdfsi(2.0, 2);
try test__fixunsdfsi(2.01, 2);
try test__fixunsdfsi(-0.5, 0);
try test__fixunsdfsi(-0.99, 0);
try test__fixunsdfsi(-1.0, 0);
try test__fixunsdfsi(-1.5, 0);
try test__fixunsdfsi(-1.99, 0);
try test__fixunsdfsi(-2.0, 0);
try test__fixunsdfsi(-2.01, 0);
try test__fixunsdfsi(0x1.000000p+31, 0x80000000);
try test__fixunsdfsi(0x1.000000p+32, 0xFFFFFFFF);
try test__fixunsdfsi(0x1.FFFFFEp+31, 0xFFFFFF00);
try test__fixunsdfsi(0x1.FFFFFEp+30, 0x7FFFFF80);
try test__fixunsdfsi(0x1.FFFFFCp+30, 0x7FFFFF00);
try test__fixunsdfsi(-0x1.FFFFFEp+30, 0);
try test__fixunsdfsi(-0x1.FFFFFCp+30, 0);
try test__fixunsdfsi(0x1.FFFFFFFEp+31, 0xFFFFFFFF);
try test__fixunsdfsi(0x1.FFFFFFFC00000p+30, 0x7FFFFFFF);
try test__fixunsdfsi(0x1.FFFFFFF800000p+30, 0x7FFFFFFE);
}
fn test__fixdfdi(a: f64, expected: i64) !void {
const x = __fixdfdi(a);
try testing.expect(x == expected);
}
fn test__fixunsdfdi(a: f64, expected: u64) !void {
const x = __fixunsdfdi(a);
try testing.expect(x == expected);
}
test "fixdfdi" {
try test__fixdfdi(-math.floatMax(f64), math.minInt(i64));
try test__fixdfdi(-0x1.FFFFFFFFFFFFFp+1023, math.minInt(i64));
try test__fixdfdi(-0x1.FFFFFFFFFFFFFp+1023, -0x8000000000000000);
try test__fixdfdi(-0x1.0000000000000p+127, -0x8000000000000000);
try test__fixdfdi(-0x1.FFFFFFFFFFFFFp+126, -0x8000000000000000);
try test__fixdfdi(-0x1.FFFFFFFFFFFFEp+126, -0x8000000000000000);
try test__fixdfdi(-0x1.0000000000001p+63, -0x8000000000000000);
try test__fixdfdi(-0x1.0000000000000p+63, -0x8000000000000000);
try test__fixdfdi(-0x1.FFFFFFFFFFFFFp+62, -0x7FFFFFFFFFFFFC00);
try test__fixdfdi(-0x1.FFFFFFFFFFFFEp+62, -0x7FFFFFFFFFFFF800);
try test__fixdfdi(-0x1.FFFFFEp+62, -0x7fffff8000000000);
try test__fixdfdi(-0x1.FFFFFCp+62, -0x7fffff0000000000);
try test__fixdfdi(-2.01, -2);
try test__fixdfdi(-2.0, -2);
try test__fixdfdi(-1.99, -1);
try test__fixdfdi(-1.0, -1);
try test__fixdfdi(-0.99, 0);
try test__fixdfdi(-0.5, 0);
try test__fixdfdi(-math.floatMin(f64), 0);
try test__fixdfdi(0.0, 0);
try test__fixdfdi(math.floatMin(f64), 0);
try test__fixdfdi(0.5, 0);
try test__fixdfdi(0.99, 0);
try test__fixdfdi(1.0, 1);
try test__fixdfdi(1.5, 1);
try test__fixdfdi(1.99, 1);
try test__fixdfdi(2.0, 2);
try test__fixdfdi(2.01, 2);
try test__fixdfdi(0x1.FFFFFCp+62, 0x7FFFFF0000000000);
try test__fixdfdi(0x1.FFFFFEp+62, 0x7FFFFF8000000000);
try test__fixdfdi(0x1.FFFFFFFFFFFFEp+62, 0x7FFFFFFFFFFFF800);
try test__fixdfdi(0x1.FFFFFFFFFFFFFp+62, 0x7FFFFFFFFFFFFC00);
try test__fixdfdi(0x1.0000000000000p+63, 0x7FFFFFFFFFFFFFFF);
try test__fixdfdi(0x1.0000000000001p+63, 0x7FFFFFFFFFFFFFFF);
try test__fixdfdi(0x1.FFFFFFFFFFFFEp+126, 0x7FFFFFFFFFFFFFFF);
try test__fixdfdi(0x1.FFFFFFFFFFFFFp+126, 0x7FFFFFFFFFFFFFFF);
try test__fixdfdi(0x1.0000000000000p+127, 0x7FFFFFFFFFFFFFFF);
try test__fixdfdi(0x1.FFFFFFFFFFFFFp+1023, 0x7FFFFFFFFFFFFFFF);
try test__fixdfdi(0x1.FFFFFFFFFFFFFp+1023, math.maxInt(i64));
try test__fixdfdi(math.floatMax(f64), math.maxInt(i64));
}
test "fixunsdfdi" {
try test__fixunsdfdi(0.0, 0);
try test__fixunsdfdi(0.5, 0);
try test__fixunsdfdi(0.99, 0);
try test__fixunsdfdi(1.0, 1);
try test__fixunsdfdi(1.5, 1);
try test__fixunsdfdi(1.99, 1);
try test__fixunsdfdi(2.0, 2);
try test__fixunsdfdi(2.01, 2);
try test__fixunsdfdi(-0.5, 0);
try test__fixunsdfdi(-0.99, 0);
try test__fixunsdfdi(-1.0, 0);
try test__fixunsdfdi(-1.5, 0);
try test__fixunsdfdi(-1.99, 0);
try test__fixunsdfdi(-2.0, 0);
try test__fixunsdfdi(-2.01, 0);
try test__fixunsdfdi(0x1.FFFFFEp+62, 0x7FFFFF8000000000);
try test__fixunsdfdi(0x1.FFFFFCp+62, 0x7FFFFF0000000000);
try test__fixunsdfdi(-0x1.FFFFFEp+62, 0);
try test__fixunsdfdi(-0x1.FFFFFCp+62, 0);
try test__fixunsdfdi(0x1.FFFFFFFFFFFFFp+63, 0xFFFFFFFFFFFFF800);
try test__fixunsdfdi(0x1.0000000000000p+63, 0x8000000000000000);
try test__fixunsdfdi(0x1.FFFFFFFFFFFFFp+62, 0x7FFFFFFFFFFFFC00);
try test__fixunsdfdi(0x1.FFFFFFFFFFFFEp+62, 0x7FFFFFFFFFFFF800);
try test__fixunsdfdi(-0x1.FFFFFFFFFFFFFp+62, 0);
try test__fixunsdfdi(-0x1.FFFFFFFFFFFFEp+62, 0);
}
fn test__fixdfti(a: f64, expected: i128) !void {
const x = __fixdfti(a);
try testing.expect(x == expected);
}
fn test__fixunsdfti(a: f64, expected: u128) !void {
const x = __fixunsdfti(a);
try testing.expect(x == expected);
}
test "fixdfti" {
try test__fixdfti(-math.floatMax(f64), math.minInt(i128));
try test__fixdfti(-0x1.FFFFFFFFFFFFFp+1023, math.minInt(i128));
try test__fixdfti(-0x1.FFFFFFFFFFFFFp+1023, -0x80000000000000000000000000000000);
try test__fixdfti(-0x1.0000000000000p+127, -0x80000000000000000000000000000000);
try test__fixdfti(-0x1.FFFFFFFFFFFFFp+126, -0x7FFFFFFFFFFFFC000000000000000000);
try test__fixdfti(-0x1.FFFFFFFFFFFFEp+126, -0x7FFFFFFFFFFFF8000000000000000000);
try test__fixdfti(-0x1.0000000000001p+63, -0x8000000000000800);
try test__fixdfti(-0x1.0000000000000p+63, -0x8000000000000000);
try test__fixdfti(-0x1.FFFFFFFFFFFFFp+62, -0x7FFFFFFFFFFFFC00);
try test__fixdfti(-0x1.FFFFFFFFFFFFEp+62, -0x7FFFFFFFFFFFF800);
try test__fixdfti(-0x1.FFFFFEp+62, -0x7fffff8000000000);
try test__fixdfti(-0x1.FFFFFCp+62, -0x7fffff0000000000);
try test__fixdfti(-2.01, -2);
try test__fixdfti(-2.0, -2);
try test__fixdfti(-1.99, -1);
try test__fixdfti(-1.0, -1);
try test__fixdfti(-0.99, 0);
try test__fixdfti(-0.5, 0);
try test__fixdfti(-math.floatMin(f64), 0);
try test__fixdfti(0.0, 0);
try test__fixdfti(math.floatMin(f64), 0);
try test__fixdfti(0.5, 0);
try test__fixdfti(0.99, 0);
try test__fixdfti(1.0, 1);
try test__fixdfti(1.5, 1);
try test__fixdfti(1.99, 1);
try test__fixdfti(2.0, 2);
try test__fixdfti(2.01, 2);
try test__fixdfti(0x1.FFFFFCp+62, 0x7FFFFF0000000000);
try test__fixdfti(0x1.FFFFFEp+62, 0x7FFFFF8000000000);
try test__fixdfti(0x1.FFFFFFFFFFFFEp+62, 0x7FFFFFFFFFFFF800);
try test__fixdfti(0x1.FFFFFFFFFFFFFp+62, 0x7FFFFFFFFFFFFC00);
try test__fixdfti(0x1.0000000000000p+63, 0x8000000000000000);
try test__fixdfti(0x1.0000000000001p+63, 0x8000000000000800);
try test__fixdfti(0x1.FFFFFFFFFFFFEp+126, 0x7FFFFFFFFFFFF8000000000000000000);
try test__fixdfti(0x1.FFFFFFFFFFFFFp+126, 0x7FFFFFFFFFFFFC000000000000000000);
try test__fixdfti(0x1.0000000000000p+127, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
try test__fixdfti(0x1.FFFFFFFFFFFFFp+1023, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
try test__fixdfti(0x1.FFFFFFFFFFFFFp+1023, math.maxInt(i128));
try test__fixdfti(math.floatMax(f64), math.maxInt(i128));
}
test "fixunsdfti" {
try test__fixunsdfti(0.0, 0);
try test__fixunsdfti(0.5, 0);
try test__fixunsdfti(0.99, 0);
try test__fixunsdfti(1.0, 1);
try test__fixunsdfti(1.5, 1);
try test__fixunsdfti(1.99, 1);
try test__fixunsdfti(2.0, 2);
try test__fixunsdfti(2.01, 2);
try test__fixunsdfti(-0.5, 0);
try test__fixunsdfti(-0.99, 0);
try test__fixunsdfti(-1.0, 0);
try test__fixunsdfti(-1.5, 0);
try test__fixunsdfti(-1.99, 0);
try test__fixunsdfti(-2.0, 0);
try test__fixunsdfti(-2.01, 0);
try test__fixunsdfti(0x1.FFFFFEp+62, 0x7FFFFF8000000000);
try test__fixunsdfti(0x1.FFFFFCp+62, 0x7FFFFF0000000000);
try test__fixunsdfti(-0x1.FFFFFEp+62, 0);
try test__fixunsdfti(-0x1.FFFFFCp+62, 0);
try test__fixunsdfti(0x1.FFFFFFFFFFFFFp+63, 0xFFFFFFFFFFFFF800);
try test__fixunsdfti(0x1.0000000000000p+63, 0x8000000000000000);
try test__fixunsdfti(0x1.FFFFFFFFFFFFFp+62, 0x7FFFFFFFFFFFFC00);
try test__fixunsdfti(0x1.FFFFFFFFFFFFEp+62, 0x7FFFFFFFFFFFF800);
try test__fixunsdfti(0x1.FFFFFFFFFFFFFp+127, 0xFFFFFFFFFFFFF8000000000000000000);
try test__fixunsdfti(0x1.0000000000000p+127, 0x80000000000000000000000000000000);
try test__fixunsdfti(0x1.FFFFFFFFFFFFFp+126, 0x7FFFFFFFFFFFFC000000000000000000);
try test__fixunsdfti(0x1.FFFFFFFFFFFFEp+126, 0x7FFFFFFFFFFFF8000000000000000000);
try test__fixunsdfti(0x1.0000000000000p+128, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
try test__fixunsdfti(-0x1.FFFFFFFFFFFFFp+62, 0);
try test__fixunsdfti(-0x1.FFFFFFFFFFFFEp+62, 0);
}
fn test__fixtfsi(a: f128, expected: i32) !void {
const x = __fixtfsi(a);
try testing.expect(x == expected);
}
fn test__fixunstfsi(a: f128, expected: u32) !void {
const x = __fixunstfsi(a);
try testing.expect(x == expected);
}
test "fixtfsi" {
try test__fixtfsi(-math.floatMax(f128), math.minInt(i32));
try test__fixtfsi(-0x1.FFFFFFFFFFFFFp+1023, math.minInt(i32));
try test__fixtfsi(-0x1.FFFFFFFFFFFFFp+1023, -0x80000000);
try test__fixtfsi(-0x1.0000000000000p+127, -0x80000000);
try test__fixtfsi(-0x1.FFFFFFFFFFFFFp+126, -0x80000000);
try test__fixtfsi(-0x1.FFFFFFFFFFFFEp+126, -0x80000000);
try test__fixtfsi(-0x1.0000000000001p+63, -0x80000000);
try test__fixtfsi(-0x1.0000000000000p+63, -0x80000000);
try test__fixtfsi(-0x1.FFFFFFFFFFFFFp+62, -0x80000000);
try test__fixtfsi(-0x1.FFFFFFFFFFFFEp+62, -0x80000000);
try test__fixtfsi(-0x1.FFFFFEp+62, -0x80000000);
try test__fixtfsi(-0x1.FFFFFCp+62, -0x80000000);
try test__fixtfsi(-0x1.000000p+31, -0x80000000);
try test__fixtfsi(-0x1.FFFFFFp+30, -0x7FFFFFC0);
try test__fixtfsi(-0x1.FFFFFEp+30, -0x7FFFFF80);
try test__fixtfsi(-0x1.FFFFFCp+30, -0x7FFFFF00);
try test__fixtfsi(-2.01, -2);
try test__fixtfsi(-2.0, -2);
try test__fixtfsi(-1.99, -1);
try test__fixtfsi(-1.0, -1);
try test__fixtfsi(-0.99, 0);
try test__fixtfsi(-0.5, 0);
try test__fixtfsi(-math.floatMin(f32), 0);
try test__fixtfsi(0.0, 0);
try test__fixtfsi(math.floatMin(f32), 0);
try test__fixtfsi(0.5, 0);
try test__fixtfsi(0.99, 0);
try test__fixtfsi(1.0, 1);
try test__fixtfsi(1.5, 1);
try test__fixtfsi(1.99, 1);
try test__fixtfsi(2.0, 2);
try test__fixtfsi(2.01, 2);
try test__fixtfsi(0x1.FFFFFCp+30, 0x7FFFFF00);
try test__fixtfsi(0x1.FFFFFEp+30, 0x7FFFFF80);
try test__fixtfsi(0x1.FFFFFFp+30, 0x7FFFFFC0);
try test__fixtfsi(0x1.000000p+31, 0x7FFFFFFF);
try test__fixtfsi(0x1.FFFFFCp+62, 0x7FFFFFFF);
try test__fixtfsi(0x1.FFFFFEp+62, 0x7FFFFFFF);
try test__fixtfsi(0x1.FFFFFFFFFFFFEp+62, 0x7FFFFFFF);
try test__fixtfsi(0x1.FFFFFFFFFFFFFp+62, 0x7FFFFFFF);
try test__fixtfsi(0x1.0000000000000p+63, 0x7FFFFFFF);
try test__fixtfsi(0x1.0000000000001p+63, 0x7FFFFFFF);
try test__fixtfsi(0x1.FFFFFFFFFFFFEp+126, 0x7FFFFFFF);
try test__fixtfsi(0x1.FFFFFFFFFFFFFp+126, 0x7FFFFFFF);
try test__fixtfsi(0x1.0000000000000p+127, 0x7FFFFFFF);
try test__fixtfsi(0x1.FFFFFFFFFFFFFp+1023, 0x7FFFFFFF);
try test__fixtfsi(0x1.FFFFFFFFFFFFFp+1023, math.maxInt(i32));
try test__fixtfsi(math.floatMax(f128), math.maxInt(i32));
}
test "fixunstfsi" {
try test__fixunstfsi(math.inf(f128), 0xffffffff);
try test__fixunstfsi(0, 0x0);
try test__fixunstfsi(0x1.23456789abcdefp+5, 0x24);
try test__fixunstfsi(0x1.23456789abcdefp-3, 0x0);
try test__fixunstfsi(0x1.23456789abcdefp+20, 0x123456);
try test__fixunstfsi(0x1.23456789abcdefp+40, 0xffffffff);
try test__fixunstfsi(0x1.23456789abcdefp+256, 0xffffffff);
try test__fixunstfsi(-0x1.23456789abcdefp+3, 0x0);
try test__fixunstfsi(0x1p+32, 0xFFFFFFFF);
}
fn test__fixtfdi(a: f128, expected: i64) !void {
const x = __fixtfdi(a);
try testing.expect(x == expected);
}
fn test__fixunstfdi(a: f128, expected: u64) !void {
const x = __fixunstfdi(a);
try testing.expect(x == expected);
}
test "fixtfdi" {
try test__fixtfdi(-math.floatMax(f128), math.minInt(i64));
try test__fixtfdi(-0x1.FFFFFFFFFFFFFp+1023, math.minInt(i64));
try test__fixtfdi(-0x1.FFFFFFFFFFFFFp+1023, -0x8000000000000000);
try test__fixtfdi(-0x1.0000000000000p+127, -0x8000000000000000);
try test__fixtfdi(-0x1.FFFFFFFFFFFFFp+126, -0x8000000000000000);
try test__fixtfdi(-0x1.FFFFFFFFFFFFEp+126, -0x8000000000000000);
try test__fixtfdi(-0x1.0000000000001p+63, -0x8000000000000000);
try test__fixtfdi(-0x1.0000000000000p+63, -0x8000000000000000);
try test__fixtfdi(-0x1.FFFFFFFFFFFFFp+62, -0x7FFFFFFFFFFFFC00);
try test__fixtfdi(-0x1.FFFFFFFFFFFFEp+62, -0x7FFFFFFFFFFFF800);
try test__fixtfdi(-0x1.FFFFFEp+62, -0x7FFFFF8000000000);
try test__fixtfdi(-0x1.FFFFFCp+62, -0x7FFFFF0000000000);
try test__fixtfdi(-0x1.000000p+31, -0x80000000);
try test__fixtfdi(-0x1.FFFFFFp+30, -0x7FFFFFC0);
try test__fixtfdi(-0x1.FFFFFEp+30, -0x7FFFFF80);
try test__fixtfdi(-0x1.FFFFFCp+30, -0x7FFFFF00);
try test__fixtfdi(-2.01, -2);
try test__fixtfdi(-2.0, -2);
try test__fixtfdi(-1.99, -1);
try test__fixtfdi(-1.0, -1);
try test__fixtfdi(-0.99, 0);
try test__fixtfdi(-0.5, 0);
try test__fixtfdi(-math.floatMin(f64), 0);
try test__fixtfdi(0.0, 0);
try test__fixtfdi(math.floatMin(f64), 0);
try test__fixtfdi(0.5, 0);
try test__fixtfdi(0.99, 0);
try test__fixtfdi(1.0, 1);
try test__fixtfdi(1.5, 1);
try test__fixtfdi(1.99, 1);
try test__fixtfdi(2.0, 2);
try test__fixtfdi(2.01, 2);
try test__fixtfdi(0x1.FFFFFCp+30, 0x7FFFFF00);
try test__fixtfdi(0x1.FFFFFEp+30, 0x7FFFFF80);
try test__fixtfdi(0x1.FFFFFFp+30, 0x7FFFFFC0);
try test__fixtfdi(0x1.000000p+31, 0x80000000);
try test__fixtfdi(0x1.FFFFFCp+62, 0x7FFFFF0000000000);
try test__fixtfdi(0x1.FFFFFEp+62, 0x7FFFFF8000000000);
try test__fixtfdi(0x1.FFFFFFFFFFFFEp+62, 0x7FFFFFFFFFFFF800);
try test__fixtfdi(0x1.FFFFFFFFFFFFFp+62, 0x7FFFFFFFFFFFFC00);
try test__fixtfdi(0x1.0000000000000p+63, 0x7FFFFFFFFFFFFFFF);
try test__fixtfdi(0x1.0000000000001p+63, 0x7FFFFFFFFFFFFFFF);
try test__fixtfdi(0x1.FFFFFFFFFFFFEp+126, 0x7FFFFFFFFFFFFFFF);
try test__fixtfdi(0x1.FFFFFFFFFFFFFp+126, 0x7FFFFFFFFFFFFFFF);
try test__fixtfdi(0x1.0000000000000p+127, 0x7FFFFFFFFFFFFFFF);
try test__fixtfdi(0x1.FFFFFFFFFFFFFp+1023, 0x7FFFFFFFFFFFFFFF);
try test__fixtfdi(0x1.FFFFFFFFFFFFFp+1023, math.maxInt(i64));
try test__fixtfdi(math.floatMax(f128), math.maxInt(i64));
}
test "fixunstfdi" {
try test__fixunstfdi(0.0, 0);
try test__fixunstfdi(0.5, 0);
try test__fixunstfdi(0.99, 0);
try test__fixunstfdi(1.0, 1);
try test__fixunstfdi(1.5, 1);
try test__fixunstfdi(1.99, 1);
try test__fixunstfdi(2.0, 2);
try test__fixunstfdi(2.01, 2);
try test__fixunstfdi(-0.5, 0);
try test__fixunstfdi(-0.99, 0);
try test__fixunstfdi(-1.0, 0);
try test__fixunstfdi(-1.5, 0);
try test__fixunstfdi(-1.99, 0);
try test__fixunstfdi(-2.0, 0);
try test__fixunstfdi(-2.01, 0);
try test__fixunstfdi(0x1.FFFFFEp+62, 0x7FFFFF8000000000);
try test__fixunstfdi(0x1.FFFFFCp+62, 0x7FFFFF0000000000);
try test__fixunstfdi(-0x1.FFFFFEp+62, 0);
try test__fixunstfdi(-0x1.FFFFFCp+62, 0);
try test__fixunstfdi(0x1.FFFFFFFFFFFFFp+62, 0x7FFFFFFFFFFFFC00);
try test__fixunstfdi(0x1.FFFFFFFFFFFFEp+62, 0x7FFFFFFFFFFFF800);
try test__fixunstfdi(-0x1.FFFFFFFFFFFFFp+62, 0);
try test__fixunstfdi(-0x1.FFFFFFFFFFFFEp+62, 0);
try test__fixunstfdi(0x1.FFFFFFFFFFFFFFFEp+63, 0xFFFFFFFFFFFFFFFF);
try test__fixunstfdi(0x1.0000000000000002p+63, 0x8000000000000001);
try test__fixunstfdi(0x1.0000000000000000p+63, 0x8000000000000000);
try test__fixunstfdi(0x1.FFFFFFFFFFFFFFFCp+62, 0x7FFFFFFFFFFFFFFF);
try test__fixunstfdi(0x1.FFFFFFFFFFFFFFF8p+62, 0x7FFFFFFFFFFFFFFE);
try test__fixunstfdi(0x1p+64, 0xFFFFFFFFFFFFFFFF);
try test__fixunstfdi(-0x1.0000000000000000p+63, 0);
try test__fixunstfdi(-0x1.FFFFFFFFFFFFFFFCp+62, 0);
try test__fixunstfdi(-0x1.FFFFFFFFFFFFFFF8p+62, 0);
}
fn test__fixtfti(a: f128, expected: i128) !void {
const x = __fixtfti(a);
try testing.expect(x == expected);
}
fn test__fixunstfti(a: f128, expected: u128) !void {
const x = __fixunstfti(a);
try testing.expect(x == expected);
}
test "fixtfti" {
try test__fixtfti(-math.floatMax(f128), math.minInt(i128));
try test__fixtfti(-0x1.FFFFFFFFFFFFFp+1023, math.minInt(i128));
try test__fixtfti(-0x1.FFFFFFFFFFFFFp+1023, -0x80000000000000000000000000000000);
try test__fixtfti(-0x1.0000000000000p+127, -0x80000000000000000000000000000000);
try test__fixtfti(-0x1.FFFFFFFFFFFFFp+126, -0x7FFFFFFFFFFFFC000000000000000000);
try test__fixtfti(-0x1.FFFFFFFFFFFFEp+126, -0x7FFFFFFFFFFFF8000000000000000000);
try test__fixtfti(-0x1.0000000000001p+63, -0x8000000000000800);
try test__fixtfti(-0x1.0000000000000p+63, -0x8000000000000000);
try test__fixtfti(-0x1.FFFFFFFFFFFFFp+62, -0x7FFFFFFFFFFFFC00);
try test__fixtfti(-0x1.FFFFFFFFFFFFEp+62, -0x7FFFFFFFFFFFF800);
try test__fixtfti(-0x1.FFFFFEp+62, -0x7fffff8000000000);
try test__fixtfti(-0x1.FFFFFCp+62, -0x7fffff0000000000);
try test__fixtfti(-2.01, -2);
try test__fixtfti(-2.0, -2);
try test__fixtfti(-1.99, -1);
try test__fixtfti(-1.0, -1);
try test__fixtfti(-0.99, 0);
try test__fixtfti(-0.5, 0);
try test__fixtfti(-math.floatMin(f128), 0);
try test__fixtfti(0.0, 0);
try test__fixtfti(math.floatMin(f128), 0);
try test__fixtfti(0.5, 0);
try test__fixtfti(0.99, 0);
try test__fixtfti(1.0, 1);
try test__fixtfti(1.5, 1);
try test__fixtfti(1.99, 1);
try test__fixtfti(2.0, 2);
try test__fixtfti(2.01, 2);
try test__fixtfti(0x1.FFFFFCp+62, 0x7FFFFF0000000000);
try test__fixtfti(0x1.FFFFFEp+62, 0x7FFFFF8000000000);
try test__fixtfti(0x1.FFFFFFFFFFFFEp+62, 0x7FFFFFFFFFFFF800);
try test__fixtfti(0x1.FFFFFFFFFFFFFp+62, 0x7FFFFFFFFFFFFC00);
try test__fixtfti(0x1.0000000000000p+63, 0x8000000000000000);
try test__fixtfti(0x1.0000000000001p+63, 0x8000000000000800);
try test__fixtfti(0x1.FFFFFFFFFFFFEp+126, 0x7FFFFFFFFFFFF8000000000000000000);
try test__fixtfti(0x1.FFFFFFFFFFFFFp+126, 0x7FFFFFFFFFFFFC000000000000000000);
try test__fixtfti(0x1.0000000000000p+127, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
try test__fixtfti(0x1.FFFFFFFFFFFFFp+1023, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
try test__fixtfti(0x1.FFFFFFFFFFFFFp+1023, math.maxInt(i128));
try test__fixtfti(math.floatMax(f128), math.maxInt(i128));
}
test "fixunstfti" {
try test__fixunstfti(math.inf(f128), 0xffffffffffffffffffffffffffffffff);
try test__fixunstfti(0.0, 0);
try test__fixunstfti(0.5, 0);
try test__fixunstfti(0.99, 0);
try test__fixunstfti(1.0, 1);
try test__fixunstfti(1.5, 1);
try test__fixunstfti(1.99, 1);
try test__fixunstfti(2.0, 2);
try test__fixunstfti(2.01, 2);
try test__fixunstfti(-0.01, 0);
try test__fixunstfti(-0.99, 0);
try test__fixunstfti(0x1p+128, 0xffffffffffffffffffffffffffffffff);
try test__fixunstfti(0x1.FFFFFEp+126, 0x7fffff80000000000000000000000000);
try test__fixunstfti(0x1.FFFFFEp+127, 0xffffff00000000000000000000000000);
try test__fixunstfti(0x1.FFFFFEp+128, 0xffffffffffffffffffffffffffffffff);
try test__fixunstfti(0x1.FFFFFEp+129, 0xffffffffffffffffffffffffffffffff);
}
fn test__fixunshfti(a: f16, expected: u128) !void {
const x = fixXfYi(u128, a);
try testing.expect(x == expected);
}
test "fixXfYi for f16" {
try test__fixunshfti(math.inf(f16), math.maxInt(u128));
try test__fixunshfti(math.floatMax(f16), 65504);
}
fn test__fixunsxfti(a: f80, expected: u128) !void {
const x = fixXfYi(u128, a);
try testing.expect(x == expected);
}
test "fixXfYi for f80" {
try test__fixunsxfti(math.inf(f80), math.maxInt(u128));
try test__fixunsxfti(math.floatMax(f80), math.maxInt(u128));
try test__fixunsxfti(math.maxInt(u64), math.maxInt(u64));
} | lib/std/special/compiler_rt/fixXfYi_test.zig |
const std = @import("std.zig");
const tokenizer = @import("zig/tokenizer.zig");
const fmt = @import("zig/fmt.zig");
const assert = std.debug.assert;
pub const Token = tokenizer.Token;
pub const Tokenizer = tokenizer.Tokenizer;
pub const fmtId = fmt.fmtId;
pub const fmtEscapes = fmt.fmtEscapes;
pub const isValidId = fmt.isValidId;
pub const parse = @import("zig/parse.zig").parse;
pub const string_literal = @import("zig/string_literal.zig");
pub const Ast = @import("zig/Ast.zig");
pub const system = @import("zig/system.zig");
pub const CrossTarget = @import("zig/CrossTarget.zig");
// Character literal parsing
pub const ParsedCharLiteral = string_literal.ParsedCharLiteral;
pub const parseCharLiteral = string_literal.parseCharLiteral;
// Files needed by translate-c.
pub const c_builtins = @import("zig/c_builtins.zig");
pub const c_translation = @import("zig/c_translation.zig");
pub const SrcHash = [16]u8;
pub fn hashSrc(src: []const u8) SrcHash {
var out: SrcHash = undefined;
std.crypto.hash.Blake3.hash(src, &out, .{});
return out;
}
pub fn srcHashEql(a: SrcHash, b: SrcHash) bool {
return @bitCast(u128, a) == @bitCast(u128, b);
}
pub fn hashName(parent_hash: SrcHash, sep: []const u8, name: []const u8) SrcHash {
var out: SrcHash = undefined;
var hasher = std.crypto.hash.Blake3.init(.{});
hasher.update(&parent_hash);
hasher.update(sep);
hasher.update(name);
hasher.final(&out);
return out;
}
pub const Loc = struct {
line: usize,
column: usize,
/// Does not include the trailing newline.
source_line: []const u8,
};
pub fn findLineColumn(source: []const u8, byte_offset: usize) Loc {
var line: usize = 0;
var column: usize = 0;
var line_start: usize = 0;
var i: usize = 0;
while (i < byte_offset) : (i += 1) {
switch (source[i]) {
'\n' => {
line += 1;
column = 0;
line_start = i + 1;
},
else => {
column += 1;
},
}
}
while (i < source.len and source[i] != '\n') {
i += 1;
}
return .{
.line = line,
.column = column,
.source_line = source[line_start..i],
};
}
pub fn lineDelta(source: []const u8, start: usize, end: usize) isize {
var line: isize = 0;
if (end >= start) {
for (source[start..end]) |byte| switch (byte) {
'\n' => line += 1,
else => continue,
};
} else {
for (source[end..start]) |byte| switch (byte) {
'\n' => line -= 1,
else => continue,
};
}
return line;
}
pub const BinNameOptions = struct {
root_name: []const u8,
target: std.Target,
output_mode: std.builtin.OutputMode,
link_mode: ?std.builtin.LinkMode = null,
object_format: ?std.Target.ObjectFormat = null,
version: ?std.builtin.Version = null,
};
/// Returns the standard file system basename of a binary generated by the Zig compiler.
pub fn binNameAlloc(allocator: std.mem.Allocator, options: BinNameOptions) error{OutOfMemory}![]u8 {
const root_name = options.root_name;
const target = options.target;
const ofmt = options.object_format orelse target.getObjectFormat();
switch (ofmt) {
.coff => switch (options.output_mode) {
.Exe => return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, target.exeFileExt() }),
.Lib => {
const suffix = switch (options.link_mode orelse .Static) {
.Static => ".lib",
.Dynamic => ".dll",
};
return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, suffix });
},
.Obj => return std.fmt.allocPrint(allocator, "{s}.obj", .{root_name}),
},
.elf => switch (options.output_mode) {
.Exe => return allocator.dupe(u8, root_name),
.Lib => {
switch (options.link_mode orelse .Static) {
.Static => return std.fmt.allocPrint(allocator, "{s}{s}.a", .{
target.libPrefix(), root_name,
}),
.Dynamic => {
if (options.version) |ver| {
return std.fmt.allocPrint(allocator, "{s}{s}.so.{d}.{d}.{d}", .{
target.libPrefix(), root_name, ver.major, ver.minor, ver.patch,
});
} else {
return std.fmt.allocPrint(allocator, "{s}{s}.so", .{
target.libPrefix(), root_name,
});
}
},
}
},
.Obj => return std.fmt.allocPrint(allocator, "{s}.o", .{root_name}),
},
.macho => switch (options.output_mode) {
.Exe => return allocator.dupe(u8, root_name),
.Lib => {
switch (options.link_mode orelse .Static) {
.Static => return std.fmt.allocPrint(allocator, "{s}{s}.a", .{
target.libPrefix(), root_name,
}),
.Dynamic => {
if (options.version) |ver| {
return std.fmt.allocPrint(allocator, "{s}{s}.{d}.{d}.{d}.dylib", .{
target.libPrefix(), root_name, ver.major, ver.minor, ver.patch,
});
} else {
return std.fmt.allocPrint(allocator, "{s}{s}.dylib", .{
target.libPrefix(), root_name,
});
}
},
}
},
.Obj => return std.fmt.allocPrint(allocator, "{s}.o", .{root_name}),
},
.wasm => switch (options.output_mode) {
.Exe => return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, target.exeFileExt() }),
.Lib => {
switch (options.link_mode orelse .Static) {
.Static => return std.fmt.allocPrint(allocator, "{s}{s}.a", .{
target.libPrefix(), root_name,
}),
.Dynamic => return std.fmt.allocPrint(allocator, "{s}.wasm", .{root_name}),
}
},
.Obj => return std.fmt.allocPrint(allocator, "{s}.o", .{root_name}),
},
.c => return std.fmt.allocPrint(allocator, "{s}.c", .{root_name}),
.spirv => return std.fmt.allocPrint(allocator, "{s}.spv", .{root_name}),
.hex => return std.fmt.allocPrint(allocator, "{s}.ihex", .{root_name}),
.raw => return std.fmt.allocPrint(allocator, "{s}.bin", .{root_name}),
.plan9 => switch (options.output_mode) {
.Exe => return allocator.dupe(u8, root_name),
.Obj => return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, ofmt.fileExt(target.cpu.arch) }),
.Lib => return std.fmt.allocPrint(allocator, "{s}{s}.a", .{ target.libPrefix(), root_name }),
},
.nvptx => return std.fmt.allocPrint(allocator, "{s}", .{root_name}),
}
}
test {
@import("std").testing.refAllDecls(@This());
} | lib/std/zig.zig |
const uefi = @import("std").os.uefi;
const Guid = uefi.Guid;
pub const DevicePathProtocol = packed struct {
type: DevicePathType,
subtype: u8,
length: u16,
pub const guid align(8) = Guid{
.time_low = 0x09576e91,
.time_mid = 0x6d3f,
.time_high_and_version = 0x11d2,
.clock_seq_high_and_reserved = 0x8e,
.clock_seq_low = 0x39,
.node = [_]u8{ 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b },
};
pub fn getDevicePath(self: *const DevicePathProtocol) ?DevicePath {
return switch (self.type) {
.Hardware => blk: {
const hardware: ?HardwareDevicePath = switch (@intToEnum(HardwareDevicePath.Subtype, self.subtype)) {
.Pci => .{ .Pci = @ptrCast(*const HardwareDevicePath.PciDevicePath, self) },
.PcCard => .{ .PcCard = @ptrCast(*const HardwareDevicePath.PcCardDevicePath, self) },
.MemoryMapped => .{ .MemoryMapped = @ptrCast(*const HardwareDevicePath.MemoryMappedDevicePath, self) },
.Vendor => .{ .Vendor = @ptrCast(*const HardwareDevicePath.VendorDevicePath, self) },
.Controller => .{ .Controller = @ptrCast(*const HardwareDevicePath.ControllerDevicePath, self) },
.Bmc => .{ .Bmc = @ptrCast(*const HardwareDevicePath.BmcDevicePath, self) },
_ => null,
};
break :blk if (hardware) |h| .{ .Hardware = h } else null;
},
.Acpi => blk: {
const acpi: ?AcpiDevicePath = switch (@intToEnum(AcpiDevicePath.Subtype, self.subtype)) {
else => null, // TODO
};
break :blk if (acpi) |a| .{ .Acpi = a } else null;
},
.Messaging => blk: {
const messaging: ?MessagingDevicePath = switch (@intToEnum(MessagingDevicePath.Subtype, self.subtype)) {
else => null, // TODO
};
break :blk if (messaging) |m| .{ .Messaging = m } else null;
},
.Media => blk: {
const media: ?MediaDevicePath = switch (@intToEnum(MediaDevicePath.Subtype, self.subtype)) {
.HardDrive => .{ .HardDrive = @ptrCast(*const MediaDevicePath.HardDriveDevicePath, self) },
.Cdrom => .{ .Cdrom = @ptrCast(*const MediaDevicePath.CdromDevicePath, self) },
.Vendor => .{ .Vendor = @ptrCast(*const MediaDevicePath.VendorDevicePath, self) },
.FilePath => .{ .FilePath = @ptrCast(*const MediaDevicePath.FilePathDevicePath, self) },
.MediaProtocol => .{ .MediaProtocol = @ptrCast(*const MediaDevicePath.MediaProtocolDevicePath, self) },
.PiwgFirmwareFile => .{ .PiwgFirmwareFile = @ptrCast(*const MediaDevicePath.PiwgFirmwareFileDevicePath, self) },
.PiwgFirmwareVolume => .{ .PiwgFirmwareVolume = @ptrCast(*const MediaDevicePath.PiwgFirmwareVolumeDevicePath, self) },
.RelativeOffsetRange => .{ .RelativeOffsetRange = @ptrCast(*const MediaDevicePath.RelativeOffsetRangeDevicePath, self) },
.RamDisk => .{ .RamDisk = @ptrCast(*const MediaDevicePath.RamDiskDevicePath, self) },
_ => null,
};
break :blk if (media) |m| .{ .Media = m } else null;
},
.BiosBootSpecification => blk: {
const bbs: ?BiosBootSpecificationDevicePath = switch (@intToEnum(BiosBootSpecificationDevicePath.Subtype, self.subtype)) {
.BBS101 => .{ .BBS101 = @ptrCast(*const BiosBootSpecificationDevicePath.BBS101DevicePath, self) },
_ => null,
};
break :blk if (bbs) |b| .{ .BiosBootSpecification = b } else null;
},
.End => blk: {
const end: ?EndDevicePath = switch (@intToEnum(EndDevicePath.Subtype, self.subtype)) {
.EndEntire => .{ .EndEntire = @ptrCast(*const EndDevicePath.EndEntireDevicePath, self) },
.EndThisInstance => .{ .EndThisInstance = @ptrCast(*const EndDevicePath.EndThisInstanceDevicePath, self) },
_ => null,
};
break :blk if (end) |e| .{ .End = e } else null;
},
_ => null,
};
}
};
pub const DevicePath = union(DevicePathType) {
Hardware: HardwareDevicePath,
Acpi: AcpiDevicePath,
Messaging: MessagingDevicePath,
Media: MediaDevicePath,
BiosBootSpecification: BiosBootSpecificationDevicePath,
End: EndDevicePath,
};
pub const DevicePathType = extern enum(u8) {
Hardware = 0x01,
Acpi = 0x02,
Messaging = 0x03,
Media = 0x04,
BiosBootSpecification = 0x05,
End = 0x7f,
_,
};
pub const HardwareDevicePath = union(Subtype) {
Pci: *const PciDevicePath,
PcCard: *const PcCardDevicePath,
MemoryMapped: *const MemoryMappedDevicePath,
Vendor: *const VendorDevicePath,
Controller: *const ControllerDevicePath,
Bmc: *const BmcDevicePath,
pub const Subtype = extern enum(u8) {
Pci = 1,
PcCard = 2,
MemoryMapped = 3,
Vendor = 4,
Controller = 5,
Bmc = 6,
_,
};
pub const PciDevicePath = packed struct {
type: DevicePathType,
subtype: Subtype,
length: u16,
// TODO
};
pub const PcCardDevicePath = packed struct {
type: DevicePathType,
subtype: Subtype,
length: u16,
// TODO
};
pub const MemoryMappedDevicePath = packed struct {
type: DevicePathType,
subtype: Subtype,
length: u16,
// TODO
};
pub const VendorDevicePath = packed struct {
type: DevicePathType,
subtype: Subtype,
length: u16,
// TODO
};
pub const ControllerDevicePath = packed struct {
type: DevicePathType,
subtype: Subtype,
length: u16,
// TODO
};
pub const BmcDevicePath = packed struct {
type: DevicePathType,
subtype: Subtype,
length: u16,
// TODO
};
};
pub const AcpiDevicePath = union(Subtype) {
Acpi: void, // TODO
ExpandedAcpi: void, // TODO
Adr: void, // TODO
Nvdimm: void, // TODO
pub const Subtype = extern enum(u8) {
Acpi = 1,
ExpandedAcpi = 2,
Adr = 3,
Nvdimm = 4,
_,
};
};
pub const MessagingDevicePath = union(Subtype) {
Atapi: void, // TODO
Scsi: void, // TODO
FibreChannel: void, // TODO
FibreChannelEx: void, // TODO
@"1394": void, // TODO
Usb: void, // TODO
Sata: void, // TODO
UsbWwid: void, // TODO
Lun: void, // TODO
UsbClass: void, // TODO
I2o: void, // TODO
MacAddress: void, // TODO
Ipv4: void, // TODO
Ipv6: void, // TODO
Vlan: void, // TODO
InfiniBand: void, // TODO
Uart: void, // TODO
Vendor: void, // TODO
pub const Subtype = extern enum(u8) {
Atapi = 1,
Scsi = 2,
FibreChannel = 3,
FibreChannelEx = 21,
@"1394" = 4,
Usb = 5,
Sata = 18,
UsbWwid = 16,
Lun = 17,
UsbClass = 15,
I2o = 6,
MacAddress = 11,
Ipv4 = 12,
Ipv6 = 13,
Vlan = 20,
InfiniBand = 9,
Uart = 14,
Vendor = 10,
_,
};
};
pub const MediaDevicePath = union(Subtype) {
HardDrive: *const HardDriveDevicePath,
Cdrom: *const CdromDevicePath,
Vendor: *const VendorDevicePath,
FilePath: *const FilePathDevicePath,
MediaProtocol: *const MediaProtocolDevicePath,
PiwgFirmwareFile: *const PiwgFirmwareFileDevicePath,
PiwgFirmwareVolume: *const PiwgFirmwareVolumeDevicePath,
RelativeOffsetRange: *const RelativeOffsetRangeDevicePath,
RamDisk: *const RamDiskDevicePath,
pub const Subtype = extern enum(u8) {
HardDrive = 1,
Cdrom = 2,
Vendor = 3,
FilePath = 4,
MediaProtocol = 5,
PiwgFirmwareFile = 6,
PiwgFirmwareVolume = 7,
RelativeOffsetRange = 8,
RamDisk = 9,
_,
};
pub const HardDriveDevicePath = packed struct {
type: DevicePathType,
subtype: Subtype,
length: u16,
// TODO
};
pub const CdromDevicePath = packed struct {
type: DevicePathType,
subtype: Subtype,
length: u16,
// TODO
};
pub const VendorDevicePath = packed struct {
type: DevicePathType,
subtype: Subtype,
length: u16,
// TODO
};
pub const FilePathDevicePath = packed struct {
type: DevicePathType,
subtype: Subtype,
length: u16,
pub fn getPath(self: *const FilePathDevicePath) [*:0]const u16 {
return @ptrCast([*:0]const u16, @alignCast(2, @ptrCast([*]const u8, self)) + @sizeOf(FilePathDevicePath));
}
};
pub const MediaProtocolDevicePath = packed struct {
type: DevicePathType,
subtype: Subtype,
length: u16,
// TODO
};
pub const PiwgFirmwareFileDevicePath = packed struct {
type: DevicePathType,
subtype: Subtype,
length: u16,
};
pub const PiwgFirmwareVolumeDevicePath = packed struct {
type: DevicePathType,
subtype: Subtype,
length: u16,
};
pub const RelativeOffsetRangeDevicePath = packed struct {
type: DevicePathType,
subtype: Subtype,
length: u16,
reserved: u32,
start: u64,
end: u64,
};
pub const RamDiskDevicePath = packed struct {
type: DevicePathType,
subtype: Subtype,
length: u16,
start: u64,
end: u64,
disk_type: uefi.Guid,
instance: u16,
};
};
pub const BiosBootSpecificationDevicePath = union(Subtype) {
BBS101: *const BBS101DevicePath,
pub const Subtype = extern enum(u8) {
BBS101 = 1,
_,
};
pub const BBS101DevicePath = packed struct {
type: DevicePathType,
subtype: Subtype,
length: u16,
device_type: u16,
status_flag: u16,
pub fn getDescription(self: *const BBS101DevicePath) [*:0]const u8 {
return @ptrCast([*:0]const u8, self) + @sizeOf(BBS101DevicePath);
}
};
};
pub const EndDevicePath = union(Subtype) {
EndEntire: *const EndEntireDevicePath,
EndThisInstance: *const EndThisInstanceDevicePath,
pub const Subtype = extern enum(u8) {
EndEntire = 0xff,
EndThisInstance = 0x01,
_,
};
pub const EndEntireDevicePath = packed struct {
type: DevicePathType,
subtype: Subtype,
length: u16,
};
pub const EndThisInstanceDevicePath = packed struct {
type: DevicePathType,
subtype: Subtype,
length: u16,
};
}; | lib/std/os/uefi/protocols/device_path_protocol.zig |
const std = @import("std");
const mem = std.mem;
const net = std.net;
const os = std.os;
const IO = @import("tigerbeetle-io").IO;
const ClientHandler = struct {
io: *IO,
sock: os.socket_t,
recv_buf: []u8,
allocator: mem.Allocator,
completion: IO.Completion,
fn init(allocator: mem.Allocator, io: *IO, sock: os.socket_t) !*ClientHandler {
var buf = try allocator.alloc(u8, 1024);
var self = try allocator.create(ClientHandler);
self.* = ClientHandler{
.io = io,
.sock = sock,
.recv_buf = buf,
.allocator = allocator,
.completion = undefined,
};
return self;
}
fn deinit(self: *ClientHandler) !void {
self.allocator.free(self.recv_buf);
self.allocator.destroy(self);
}
fn start(self: *ClientHandler) !void {
self.recv();
}
fn recv(self: *ClientHandler) void {
self.io.recv(
*ClientHandler,
self,
recvCallback,
&self.completion,
self.sock,
self.recv_buf,
if (std.Target.current.os.tag == .linux) os.MSG_NOSIGNAL else 0,
);
}
fn recvCallback(
self: *ClientHandler,
completion: *IO.Completion,
result: IO.RecvError!usize,
) void {
const received = result catch @panic("recv error");
if (received == 0) {
self.io.close(
*ClientHandler,
self,
closeCallback,
completion,
self.sock,
);
return;
}
self.io.send(
*ClientHandler,
self,
sendCallback,
completion,
self.sock,
self.recv_buf[0..received],
if (std.Target.current.os.tag == .linux) os.MSG_NOSIGNAL else 0,
);
}
fn sendCallback(
self: *ClientHandler,
completion: *IO.Completion,
result: IO.SendError!usize,
) void {
_ = result catch @panic("send error");
self.recv();
}
fn closeCallback(
self: *ClientHandler,
completion: *IO.Completion,
result: IO.CloseError!void,
) void {
_ = result catch @panic("close error");
self.deinit() catch @panic("ClientHandler deinit error");
}
};
const Server = struct {
io: IO,
server: os.socket_t,
allocator: mem.Allocator,
fn init(allocator: mem.Allocator, address: std.net.Address) !Server {
const kernel_backlog = 1;
const server = try os.socket(address.any.family, os.SOCK_STREAM | os.SOCK_CLOEXEC, 0);
try os.setsockopt(
server,
os.SOL_SOCKET,
os.SO_REUSEADDR,
&std.mem.toBytes(@as(c_int, 1)),
);
try os.bind(server, &address.any, address.getOsSockLen());
try os.listen(server, kernel_backlog);
var self: Server = .{
.io = try IO.init(32, 0),
.server = server,
.allocator = allocator,
};
return self;
}
pub fn deinit(self: *Server) void {
os.close(self.server);
self.io.deinit();
}
pub fn run(self: *Server) !void {
var server_completion: IO.Completion = undefined;
self.io.accept(*Server, self, acceptCallback, &server_completion, self.server, 0);
while (true) try self.io.tick();
}
fn acceptCallback(
self: *Server,
completion: *IO.Completion,
result: IO.AcceptError!os.socket_t,
) void {
const accepted_sock = result catch @panic("accept error");
var handler = ClientHandler.init(self.allocator, &self.io, accepted_sock) catch @panic("handler create error");
handler.start() catch @panic("handler");
self.io.accept(*Server, self, acceptCallback, completion, self.server, 0);
}
};
pub fn main() anyerror!void {
const allocator = std.heap.page_allocator;
const address = try std.net.Address.parseIp4("127.0.0.1", 3131);
var server = try Server.init(allocator, address);
defer server.deinit();
try server.run();
} | examples/tcp_echo_server.zig |
const std = @import("std");
const mm = @import("root").mm;
const x86 = @import("../x86.zig");
usingnamespace @import("root").lib;
pub var logger = @TypeOf(x86.logger).childOf(@typeName(@This())){};
fn alignTo8(n: usize) usize {
const mask = 0b111;
return (n - 1 | mask) + 1;
}
pub const Arch = enum(u32) {
I386 = 0,
MIPS = 4,
};
pub const TagType = enum(u32) {
End = 0,
Cmdline = 1,
BootLoaderName = 2,
Module = 3,
BasicMemInfo = 4,
BootDev = 5,
Mmap = 6,
VBE = 7,
Framebuffer = 8,
ElfSections = 9,
APM = 10,
EFI32 = 11,
EFI64 = 12,
SMBIOS = 13,
ACPIOld = 14,
ACPINew = 15,
Network = 16,
EFIMMap = 17,
EFIBootServices = 18,
EFI32ImageHandle = 19,
EFI64ImageHandle = 20,
LoadBaseAddr = 21,
_,
pub fn v(self: @This()) u32 {
return @enumToInt(self);
}
};
const MULTIBOOT2_MAGIC = 0x36d76289;
pub const Header = packed struct {
magic: u32 = MAGIC,
architecture: u32,
header_length: u32,
checksum: u32,
const MAGIC = 0xE85250D6;
pub const Tag = packed struct {
typ: u16,
flags: u16,
size: u32,
};
pub fn init(arch: Arch, length: u32) Header {
return .{
.architecture = @enumToInt(arch),
.header_length = length,
.checksum = 0 -% (MAGIC + length + @enumToInt(arch)),
};
}
pub const NilTag = packed struct {
const field_size = @sizeOf(Tag);
tag: Tag = .{
.typ = 0,
.flags = 0,
.size = 8,
},
// Force 8-aligned struct size for each tag
_pad: [alignTo8(field_size)]u8 = undefined,
};
pub fn InformationRequestTag(n: u32) type {
return packed struct {
const field_size = @sizeOf(Tag) + @sizeOf([n]u32);
tag: Tag = .{
.typ = 1,
.flags = 0,
.size = @sizeOf(u32) * n + 8,
},
mbi_tag_types: [n]u32,
_pad: [alignTo8(field_size)]u8 = undefined,
};
}
pub const FramebufferTag = packed struct {
const field_size = @sizeOf(Tag) + 3 * @sizeOf(u32);
tag: Tag = .{
.typ = 5,
.flags = 0,
.size = 20,
},
width: u32,
height: u32,
depth: u32,
_pad: [alignTo8(field_size)]u8 = undefined,
};
};
const info_request_tag = std.mem.toBytes(Header.InformationRequestTag(2){
.mbi_tag_types = [_]u32{ TagType.Cmdline.v(), TagType.Framebuffer.v() },
});
const framebuffer_tag = std.mem.toBytes(Header.FramebufferTag{
.width = 640,
.height = 480,
.depth = 8,
});
const nil_tag = std.mem.toBytes(Header.NilTag{});
const tag_buffer = info_request_tag ++ framebuffer_tag;
const total_size = @sizeOf(Header) + tag_buffer.len + nil_tag.len;
export const mbheader align(8) linksection(".multiboot") =
std.mem.toBytes(Header.init(Arch.I386, total_size)) ++ tag_buffer ++ nil_tag;
const BootInfoStart = packed struct {
total_size: u32,
reserved: u32,
};
const BootInfoHeader = packed struct {
typ: u32,
size: u32,
};
pub var mb_phys: ?mm.PhysicalAddress = null;
pub var loader_magic: u32 = undefined;
export fn multiboot_entry(info: u32, magic: u32) callconv(.C) noreturn {
mb_phys = mm.PhysicalAddress.new(info);
loader_magic = magic;
@call(.{ .stack = x86.stack[0..] }, x86.boot_entry, .{});
}
fn get_multiboot_tag(tag: TagType) ?[]u8 {
if (loader_magic != MULTIBOOT2_MAGIC) {
@panic("Not booted by multiboot2 compliant bootloader");
}
if (mb_phys) |phys| {
const mb = mm.directMapping().to_virt(phys);
const size = mb.into_pointer(*BootInfoStart).total_size;
// Skip BootInfoStart
var buffer = mb.into_pointer([*]u8)[@sizeOf(BootInfoStart)..size];
// Iterate over multiboot tags
var header: *BootInfoHeader = undefined;
while (buffer.len > @sizeOf(BootInfoHeader)) : (buffer = buffer[alignTo8(header.size)..]) {
const chunk = buffer[0..@sizeOf(BootInfoHeader)];
header = std.mem.bytesAsValue(BootInfoHeader, chunk);
const tagt = @intToEnum(TagType, header.typ);
if (tagt == tag) {
return buffer[0..header.size];
}
}
}
return null;
}
pub fn get_multiboot_memory() ?mm.PhysicalMemoryRange {
return detect_multiboot_memory();
}
pub fn get_cmdline() ?[]u8 {
var buf: []u8 = get_multiboot_tag(.Cmdline) orelse return null;
const CmdlineTag = packed struct {
type: u32,
size: u32,
};
buf = buf[@sizeOf(CmdlineTag)..];
return buf;
}
pub const MultibootFramebuffer = packed struct {
addr: u64,
pitch: u32,
width: u32,
height: u32,
bpp: u8,
type: u8,
reserved: u8,
};
pub fn get_framebuffer() ?*MultibootFramebuffer {
var buf: []u8 = get_multiboot_tag(.Framebuffer) orelse return null;
buf = buf[2 * @sizeOf(u32) ..];
return std.mem.bytesAsValue(
MultibootFramebuffer,
buf[0..@sizeOf(MultibootFramebuffer)],
);
}
fn detect_multiboot_memory() ?mm.PhysicalMemoryRange {
const MemoryMapTag = packed struct {
typ: u32,
size: u32,
entry_size: u32,
entry_version: u32,
};
var buf: []u8 = get_multiboot_tag(.Mmap) orelse return null;
const tag = std.mem.bytesAsValue(MemoryMapTag, buf[0..@sizeOf(MemoryMapTag)]);
buf = buf[@sizeOf(MemoryMapTag)..];
const MemEntry = packed struct {
base_addr: u64,
length: u64,
type: u32,
reserved: u32,
};
var best_slot: ?mm.PhysicalMemoryRange = null;
logger.log("BIOS memory map:\n", .{});
const entry_size = tag.entry_size;
while (buf.len >= entry_size) : (buf = buf[entry_size..]) {
const entry = std.mem.bytesAsValue(MemEntry, buf[0..@sizeOf(MemEntry)]);
const start = entry.base_addr;
const end = start + entry.length - 1;
const status = switch (entry.type) {
1 => "Available",
3 => "ACPI Mem",
4 => "Preserved on hibernation",
5 => "Defective",
else => "Reserved",
};
logger.log("[{x:0>10}-{x:0>10}] {s}\n", .{ start, end, status });
if (entry.type != 1) {
continue;
}
const this_slot = mm.PhysicalMemoryRange{
.base = mm.PhysicalAddress.new(start),
.size = entry.length,
};
if (best_slot) |slot| {
if (this_slot.size > slot.size) {
best_slot = this_slot;
}
} else {
best_slot = this_slot;
}
}
return best_slot;
} | kernel/arch/x86/multiboot.zig |
const std = @import("std");
const dtblib = @import("dtb");
pub const Error = dtblib.Error || error{UartNotFound};
pub const Uart = struct {
base: u64,
reg_shift: u4,
kind: UartKind,
};
pub const UartKind = enum {
ArmPl011, // "arm,pl011" -- QEMU ARM
SnpsDwApbUart, // "snps,dw-apb-uart" -- ROCKPro64
Ns16550a, // "ns16550a" -- QEMU RISC-V
SifiveUart0, // "sifive,uart0" -- Maixduino
};
pub fn searchForUart(dtb: []const u8) Error!Uart {
var traverser: dtblib.Traverser = undefined;
try traverser.init(dtb);
var in_node = false;
var state: struct {
compatible: ?[]const u8 = null,
reg_shift: ?u4 = null,
reg: ?u64 = null,
} = undefined;
var address_cells: ?u32 = null;
var size_cells: ?u32 = null;
var ev = try traverser.next();
while (ev != .End) : (ev = try traverser.next()) {
if (!in_node) {
switch (ev) {
.BeginNode => |name| {
if (std.mem.startsWith(u8, name, "pl011@") or
std.mem.startsWith(u8, name, "serial@") or
std.mem.startsWith(u8, name, "uart@"))
{
in_node = true;
state = .{};
}
},
.Prop => |prop| {
if (std.mem.eql(u8, prop.name, "#address-cells") and address_cells == null) {
address_cells = readU32(prop.value);
} else if (std.mem.eql(u8, prop.name, "#size-cells") and size_cells == null) {
size_cells = readU32(prop.value);
}
},
else => {},
}
} else switch (ev) {
.Prop => |prop| {
if (std.mem.eql(u8, prop.name, "reg") and address_cells != null and size_cells != null) {
state.reg = try firstReg(address_cells.?, prop.value);
} else if (std.mem.eql(u8, prop.name, "status")) {
if (!std.mem.eql(u8, prop.value, "okay\x00")) {
in_node = false;
}
} else if (std.mem.eql(u8, prop.name, "compatible")) {
state.compatible = prop.value;
} else if (std.mem.eql(u8, prop.name, "reg-shift")) {
state.reg_shift = @truncate(u4, readU32(prop.value));
}
},
.BeginNode => in_node = false,
.EndNode => {
in_node = false;
const reg = state.reg orelse continue;
const compatible = state.compatible orelse continue;
const kind = if (std.mem.indexOf(u8, compatible, "arm,pl011\x00") != null)
UartKind.ArmPl011
else if (std.mem.indexOf(u8, compatible, "snps,dw-apb-uart\x00") != null)
UartKind.SnpsDwApbUart
else if (std.mem.indexOf(u8, compatible, "ns16550a\x00") != null)
UartKind.Ns16550a
else if (std.mem.indexOf(u8, compatible, "sifive,uart0\x00") != null)
UartKind.SifiveUart0
else
continue;
return Uart{
.base = reg,
.reg_shift = state.reg_shift orelse 0,
.kind = kind,
};
},
else => {},
}
}
return error.UartNotFound;
}
fn readU32(value: []const u8) u32 {
return std.mem.bigToNative(u32, @ptrCast(*const u32, @alignCast(@alignOf(u32), value.ptr)).*);
}
fn firstReg(address_cells: u32, value: []const u8) !u64 {
if (value.len % @sizeOf(u32) != 0) {
return error.BadStructure;
}
var big_endian_cells: []const u32 = @ptrCast([*]const u32, @alignCast(@alignOf(u32), value.ptr))[0 .. value.len / @sizeOf(u32)];
if (address_cells == 1) {
return std.mem.bigToNative(u32, big_endian_cells[0]);
} else if (address_cells == 2) {
return @as(u64, std.mem.bigToNative(u32, big_endian_cells[0])) << 32 | std.mem.bigToNative(u32, big_endian_cells[1]);
}
return error.UnsupportedCells;
} | common/ddtb.zig |
const std = @import("std");
const stdx = @import("stdx");
const graphics = @import("graphics");
const Color = graphics.Color;
const ui = @import("../ui.zig");
const ScrollView = ui.widgets.ScrollView;
const NullId = std.math.maxInt(u32);
const log = stdx.log.scoped(.list);
pub const ScrollList = struct {
props: struct {
children: ui.FrameListPtr = ui.FrameListPtr.init(0, 0),
bg_color: Color = Color.White,
},
list: ui.WidgetRef(List),
const Self = @This();
pub fn build(self: *Self, c: *ui.BuildContext) ui.FrameId {
return c.decl(ScrollView, .{
.enable_hscroll = false,
.bg_color = self.props.bg_color,
.child = c.decl(List, .{
.bind = &self.list,
.bg_color = self.props.bg_color,
.children = self.props.children,
}),
});
}
/// Index of ui.NullId represents no selection.
pub fn getSelectedIdx(self: *Self) u32 {
return self.list.getWidget().selected_idx;
}
};
pub const List = struct {
props: struct {
children: ui.FrameListPtr = ui.FrameListPtr.init(0, 0),
bg_color: Color = Color.White,
},
selected_idx: u32,
const Self = @This();
pub fn init(self: *Self, c: *ui.InitContext) void {
self.selected_idx = NullId;
c.addMouseDownHandler(c.node, handleMouseDownEvent);
c.addKeyDownHandler(self, onKeyDown);
}
pub fn build(self: *Self, c: *ui.BuildContext) ui.FrameId {
return c.fragment(self.props.children);
}
fn onBlur(node: *ui.Node, ctx: *ui.CommonContext) void {
_ = ctx;
_ = node;
}
fn onKeyDown(self: *Self, e: ui.KeyDownEvent) void {
_ = self;
const ke = e.val;
switch (ke.code) {
.ArrowDown => {
self.selected_idx += 1;
if (self.selected_idx >= self.props.children.len) {
self.selected_idx = self.props.children.len-1;
}
},
.ArrowUp => {
if (self.selected_idx > 0) {
self.selected_idx -= 1;
}
},
else => {},
}
}
fn handleMouseDownEvent(node: *ui.Node, e: ui.MouseDownEvent) ui.EventResult {
var self = node.getWidget(Self);
if (e.val.button == .Left) {
e.ctx.requestFocus(onBlur);
const xf = @intToFloat(f32, e.val.x);
const yf = @intToFloat(f32, e.val.y);
if (xf >= node.abs_pos.x and xf <= node.abs_pos.x + node.layout.width) {
var i: u32 = 0;
while (i < node.children.items.len) : (i += 1) {
const child = node.children.items[i];
if (yf < child.abs_pos.y) {
break;
}
if (yf >= child.abs_pos.y and yf <= child.abs_pos.y + child.layout.height) {
self.selected_idx = i;
break;
}
}
}
}
return .Continue;
}
pub fn postPropsUpdate(self: *Self) void {
if (self.selected_idx != NullId) {
if (self.selected_idx >= self.props.children.len) {
if (self.props.children.len == 0) {
self.selected_idx = NullId;
} else {
self.selected_idx = self.props.children.len - 1;
}
}
}
}
pub fn layout(self: *Self, c: *ui.LayoutContext) ui.LayoutSize {
_ = self;
const node = c.getNode();
const cstr = c.getSizeConstraint();
var vacant_size = cstr;
var max_width: f32 = 0;
var cur_y: f32 = 0;
for (node.children.items) |child| {
const child_size = c.computeLayout(child, vacant_size);
c.setLayout(child, ui.Layout.init(0, cur_y, child_size.width, child_size.height));
vacant_size.height -= child_size.height;
cur_y += child_size.height;
if (child_size.width > max_width) {
max_width = child_size.width;
}
}
var res = ui.LayoutSize.init(max_width, cur_y);
if (c.prefer_exact_width) {
res.width = cstr.width;
}
return res;
}
pub fn renderCustom(self: *Self, c: *ui.RenderContext) void {
const g = c.g;
const alo = c.getAbsLayout();
const node = c.node;
g.setFillColor(self.props.bg_color);
g.fillRect(alo.x, alo.y, alo.width, alo.height);
c.renderChildren();
if (self.selected_idx != NullId) {
// Highlight selected item.
g.setStrokeColor(Color.Blue);
g.setLineWidth(2);
const child = node.children.items[self.selected_idx];
g.drawRect(child.abs_pos.x, child.abs_pos.y, alo.width, child.layout.height);
}
}
}; | ui/src/widgets/list.zig |
const std = @import("std.zig");
const builtin = @import("builtin");
const testing = std.testing;
const ResetEvent = std.ResetEvent;
/// Lock may be held only once. If the same thread
/// tries to acquire the same mutex twice, it deadlocks.
/// This type supports static initialization and is based off of Webkit's WTF Lock (via rust parking_lot)
/// https://github.com/Amanieu/parking_lot/blob/master/core/src/word_lock.rs
/// When an application is built in single threaded release mode, all the functions are
/// no-ops. In single threaded debug mode, there is deadlock detection.
pub const Mutex = if (builtin.single_threaded)
struct {
lock: @TypeOf(lock_init),
const lock_init = if (std.debug.runtime_safety) false else {};
pub const Held = struct {
mutex: *Mutex,
pub fn release(self: Held) void {
if (std.debug.runtime_safety) {
self.mutex.lock = false;
}
}
};
pub fn init() Mutex {
return Mutex{ .lock = lock_init };
}
pub fn deinit(self: *Mutex) void {}
pub fn acquire(self: *Mutex) Held {
if (std.debug.runtime_safety and self.lock) {
@panic("deadlock detected");
}
return Held{ .mutex = self };
}
}
else
struct {
state: usize,
const MUTEX_LOCK: usize = 1 << 0;
const QUEUE_LOCK: usize = 1 << 1;
const QUEUE_MASK: usize = ~(MUTEX_LOCK | QUEUE_LOCK);
const QueueNode = std.atomic.Stack(ResetEvent).Node;
/// number of iterations to spin yielding the cpu
const SPIN_CPU = 4;
/// number of iterations to spin in the cpu yield loop
const SPIN_CPU_COUNT = 30;
/// number of iterations to spin yielding the thread
const SPIN_THREAD = 1;
pub fn init() Mutex {
return Mutex{ .state = 0 };
}
pub fn deinit(self: *Mutex) void {
self.* = undefined;
}
pub const Held = struct {
mutex: *Mutex,
pub fn release(self: Held) void {
// since MUTEX_LOCK is the first bit, we can use (.Sub) instead of (.And, ~MUTEX_LOCK).
// this is because .Sub may be implemented more efficiently than the latter
// (e.g. `lock xadd` vs `cmpxchg` loop on x86)
const state = @atomicRmw(usize, &self.mutex.state, .Sub, MUTEX_LOCK, .Release);
if ((state & QUEUE_MASK) != 0 and (state & QUEUE_LOCK) == 0) {
self.mutex.releaseSlow(state);
}
}
};
pub fn acquire(self: *Mutex) Held {
// fast path close to SpinLock fast path
if (@cmpxchgWeak(usize, &self.state, 0, MUTEX_LOCK, .Acquire, .Monotonic)) |current_state| {
self.acquireSlow(current_state);
}
return Held{ .mutex = self };
}
fn acquireSlow(self: *Mutex, current_state: usize) void {
var spin: usize = 0;
var state = current_state;
while (true) {
// try and acquire the lock if unlocked
if ((state & MUTEX_LOCK) == 0) {
state = @cmpxchgWeak(usize, &self.state, state, state | MUTEX_LOCK, .Acquire, .Monotonic) orelse return;
continue;
}
// spin only if the waiting queue isn't empty and when it hasn't spun too much already
if ((state & QUEUE_MASK) == 0 and spin < SPIN_CPU + SPIN_THREAD) {
if (spin < SPIN_CPU) {
std.SpinLock.yield(SPIN_CPU_COUNT);
} else {
std.os.sched_yield() catch std.time.sleep(0);
}
state = @atomicLoad(usize, &self.state, .Monotonic);
continue;
}
// thread should block, try and add this event to the waiting queue
var node = QueueNode{
.next = @intToPtr(?*QueueNode, state & QUEUE_MASK),
.data = ResetEvent.init(),
};
defer node.data.deinit();
const new_state = @ptrToInt(&node) | (state & ~QUEUE_MASK);
state = @cmpxchgWeak(usize, &self.state, state, new_state, .Release, .Monotonic) orelse {
// node is in the queue, wait until a `held.release()` wakes us up.
_ = node.data.wait(null) catch unreachable;
spin = 0;
state = @atomicLoad(usize, &self.state, .Monotonic);
continue;
};
}
}
fn releaseSlow(self: *Mutex, current_state: usize) void {
// grab the QUEUE_LOCK in order to signal a waiting queue node's event.
var state = current_state;
while (true) {
if ((state & QUEUE_LOCK) != 0 or (state & QUEUE_MASK) == 0)
return;
state = @cmpxchgWeak(usize, &self.state, state, state | QUEUE_LOCK, .Acquire, .Monotonic) orelse break;
}
while (true) {
// barrier needed to observe incoming state changes
defer @fence(.Acquire);
// the mutex is currently locked. try to unset the QUEUE_LOCK and let the locker wake up the next node.
// avoids waking up multiple sleeping threads which try to acquire the lock again which increases contention.
if ((state & MUTEX_LOCK) != 0) {
state = @cmpxchgWeak(usize, &self.state, state, state & ~QUEUE_LOCK, .Release, .Monotonic) orelse return;
continue;
}
// try to pop the top node on the waiting queue stack to wake it up
// while at the same time unsetting the QUEUE_LOCK.
const node = @intToPtr(*QueueNode, state & QUEUE_MASK);
const new_state = @ptrToInt(node.next) | (state & MUTEX_LOCK);
state = @cmpxchgWeak(usize, &self.state, state, new_state, .Release, .Monotonic) orelse {
_ = node.data.set(false);
return;
};
}
}
};
const TestContext = struct {
mutex: *Mutex,
data: i128,
const incr_count = 10000;
};
test "std.Mutex" {
var plenty_of_memory = try std.heap.page_allocator.alloc(u8, 300 * 1024);
defer std.heap.page_allocator.free(plenty_of_memory);
var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
var a = &fixed_buffer_allocator.allocator;
var mutex = Mutex.init();
defer mutex.deinit();
var context = TestContext{
.mutex = &mutex,
.data = 0,
};
if (builtin.single_threaded) {
worker(&context);
testing.expect(context.data == TestContext.incr_count);
} else {
const thread_count = 10;
var threads: [thread_count]*std.Thread = undefined;
for (threads) |*t| {
t.* = try std.Thread.spawn(&context, worker);
}
for (threads) |t|
t.wait();
testing.expect(context.data == thread_count * TestContext.incr_count);
}
}
fn worker(ctx: *TestContext) void {
var i: usize = 0;
while (i != TestContext.incr_count) : (i += 1) {
const held = ctx.mutex.acquire();
defer held.release();
ctx.data += 1;
}
} | lib/std/mutex.zig |
const std = @import("std");
const math = @import("../pkg/zlm.zig");
// @TODO: More settings to streamline spatial hash usage for other purposes. Maybe even
// make it so you can provide your own coordinate type and functions?
pub const SpatialHashSettings = struct {
/// The height and width of each bucket inside the hash.
bucketSize: f32 = 256,
};
pub fn Generate(comptime T: type, comptime spatialSettings: SpatialHashSettings) type {
// const VisType: type = if (spatialSettings.visualizable) SpatialVisualization else void;
return struct {
const context = struct {
pub fn hash(self: @This(), value: math.Vec2) u64 {
_ = self;
return std.hash.Wyhash.hash(438193475, &std.mem.toBytes(value));
}
pub fn eql(self: @This(), lhs: math.Vec2, rhs: math.Vec2) bool {
_ = self;
return lhs.x == rhs.x and lhs.y == rhs.y;
}
};
const Self = @This();
/// Some basic settings about the spatial hash, as given at type generation.
pub const settings = spatialSettings;
/// This is the inverse of the bucket size, the formula <floor(n*cellInverse)/cellInverse> will
/// result in the 'hash' that locates the buckets in this spatial hash.
pub const cellInverse: f32 = 1.0 / spatialSettings.bucketSize;
/// A Bucket contains all the targets inside of an imaginary cell generated by the spatial hash.
pub const Bucket = std.AutoArrayHashMap(T, void);
/// The HashType defines what
pub const HashType = std.HashMap(math.Vec2, Bucket, context, 80);
allocator: std.mem.Allocator,
/// A HashMap of (Vec2 -> Bucket) to contain all the buckets as new ones appear.
hashBins: HashType,
/// This is a temporary holding bucket of every target inside of a query. This is used for each query
/// and as such modifying the spatial hash, or starting a new query will change this bucket.
holding: Bucket,
/// Creates a spatial hash instance and allocates memory for the bucket structures.
pub fn init(allocator: std.mem.Allocator) Self {
return .{
.allocator = allocator,
.hashBins = HashType.init(allocator),
.holding = Bucket.init(allocator),
// .visualization = if (spatialSettings.visualizable) .{} else {},
};
}
/// Deallocates all the memory associated with this spatial hash. Note if T is not a pointer,
/// then this will result in the loss of data.
pub fn deinit(self: *Self) void {
var iterator = self.hashBins.iterator();
while (iterator.next()) |bin| {
bin.value_ptr.deinit();
}
self.holding.deinit();
self.hashBins.deinit();
}
// === ADDS ===
/// Adds the target to the spatial hash, into every bucket that it spans.
pub fn addAABB(self: *Self, target: T, position: math.Vec2, size: math.Vec2) void {
var start = vecToIndex(position).add(.{ .x = settings.bucketSize * 0.5, .y = settings.bucketSize * 0.5 });
var stop = vecToIndex(position.add(size)).add(.{ .x = settings.bucketSize * 0.5, .y = settings.bucketSize * 0.5 });
var current = start;
while (current.x <= stop.x) {
while (current.y <= stop.y) {
var bin = self.getBin(current);
bin.put(target, {}) catch unreachable;
current.y += settings.bucketSize;
}
current.y = start.y;
current.x += settings.bucketSize;
}
}
/// Adds the target to the spatial hash, into one single bucket.
pub fn addPoint(self: *Self, target: T, position: math.Vec2) void {
var result = self.getBin(position);
result.put(target, {}) catch unreachable;
}
// === REMOVALS ===
/// Removes the target from the spatial hash buckets that it spans. Make sure to provide
/// the same coordinates that it was added with.
pub fn removeAABB(self: *Self, target: T, position: math.Vec2, size: math.Vec2) void {
const stop = position.add(size);
var current = position;
while (current.x <= stop.x) : (current.x += settings.bucketSize) {
while (current.y <= stop.y) : (current.y += settings.bucketSize) {
var bin = self.getBin(current);
_ = bin.swapRemove(target);
}
}
}
/// Removes the target from the spatial hash's singular bucket. Make sure to provide
/// the same coordinate that it was added with.
pub fn removePoint(self: *Self, target: T, position: math.Vec2) void {
const result = self.getBin(position);
_ = result.swapRemove(target);
}
// === QUERIES ===
/// Returns an array of each T inside of the given rectangle.
/// Note that broad phase physics like this is not accurate, instead this opts to return in general
/// what *could* be a possible collision.
pub fn queryAABB(self: *Self, position: math.Vec2, size: math.Vec2) []T {
self.holding.unmanaged.clearRetainingCapacity();
var start = vecToIndex(position).add(.{ .x = settings.bucketSize * 0.5, .y = settings.bucketSize * 0.5 });
var stop = vecToIndex(position.add(size)).add(.{ .x = settings.bucketSize * 0.5, .y = settings.bucketSize * 0.5 });
var current = start;
while (current.x <= stop.x) {
while (current.y <= stop.y) {
var bin = self.getBin(current);
for (bin.keys()) |value| {
self.holding.put(value, {}) catch unreachable;
}
current.y += settings.bucketSize;
}
current.y = start.y;
current.x += settings.bucketSize;
}
return self.holding.keys();
}
/// Returns an array of each T inside of the given point's bucket.
/// Note that broad phase physics like this is not accurate, instead this opts to return in general
/// what *could* be a possible collision.
pub fn queryPoint(self: *Self, point: math.Vec2) []T {
self.holding.unmanaged.clearRetainingCapacity();
const bin = self.getBin(point);
for (bin.keys()) |value| {
self.holding.put(value, {}) catch unreachable;
}
return self.holding.keys();
}
inline fn queryLineLow(self: *Self, queryStart: math.Vec2, queryEnd: math.Vec2) void {
var delta = queryEnd.sub(queryStart);
var yi = settings.bucketSize;
var current = queryStart;
if (delta.y < 0) {
yi = -settings.bucketSize;
delta.y = -delta.y;
}
var D = (2 * delta.y) - delta.x;
while (current.x < queryEnd.x) {
// Plot:
var bin = self.getBin(current);
for (bin.keys()) |value| {
self.holding.put(value, {}) catch unreachable;
}
if (D > 0) {
current.y = current.y + yi;
D = D + (2 * (delta.y - delta.x));
} else {
D = D + 2 * delta.y;
}
current.x += settings.bucketSize;
}
}
inline fn queryLineHigh(self: *Self, queryStart: math.Vec2, queryEnd: math.Vec2) void {
var delta = queryEnd.sub(queryStart);
var xi = settings.bucketSize;
var current = queryStart;
if (delta.x < 0) {
xi = -settings.bucketSize;
delta.x = -delta.x;
}
var D = (2 * delta.x) - delta.y;
while (current.y < queryEnd.y) {
// Plot:
var bin = self.getBin(current);
for (bin.keys()) |value| {
self.holding.put(value, {}) catch unreachable;
}
if (D > 0) {
current.x = current.x + xi;
D = D + (2 * (delta.x - delta.y));
} else {
D = D + 2 * delta.x;
}
current.y += settings.bucketSize;
}
}
/// Returns an array of each T inside every bucket along this line's path.
/// Note that broad phase physics like this is not accurate, instead this opts to return in general
/// what *could* be a possible collision.
pub fn queryLine(self: *Self, queryStart: math.Vec2, queryEnd: math.Vec2) []T {
self.holding.unmanaged.clearRetainingCapacity();
// Had some edge issues with some quadrants not including start/end.
{
const bin = self.getBin(queryStart);
for (bin.keys()) |value| {
self.holding.put(value, {}) catch unreachable;
}
}
{
const bin = self.getBin(queryEnd);
for (bin.keys()) |value| {
self.holding.put(value, {}) catch unreachable;
}
}
if (std.math.fabs(queryEnd.y - queryStart.y) < std.math.fabs(queryEnd.x - queryStart.x)) {
if (queryStart.x > queryEnd.x) {
self.queryLineLow(queryEnd, queryStart);
} else {
self.queryLineLow(queryStart, queryEnd);
}
} else {
if (queryStart.y > queryEnd.y) {
self.queryLineHigh(queryEnd, queryStart);
} else {
self.queryLineHigh(queryStart, queryEnd);
}
}
return self.holding.keys();
}
inline fn getBin(self: *Self, position: math.Vec2) *Bucket {
const hash = vecToIndex(position);
const result = self.hashBins.getOrPut(hash) catch unreachable;
if (!result.found_existing) {
result.value_ptr.* = Bucket.init(self.allocator);
}
return result.value_ptr;
}
inline fn vecToIndex(vec: math.Vec2) math.Vec2 {
return .{ .x = floatToIndex(vec.x), .y = floatToIndex(vec.y) };
}
inline fn floatToIndex(float: f32) f32 {
return (std.math.floor(float * cellInverse)) / cellInverse;
}
};
}
test "speed testing spatial hash" {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
std.debug.print("\n> Spatial hash Speedtest with GPA Allocator:\n", .{});
var hash = Generate(usize, .{ .bucketSize = 50 }).init(&gpa.allocator);
defer hash.deinit();
var rand = std.rand.DefaultPrng.init(3741837483).random;
var clock = std.time.Timer.start() catch unreachable;
_ = clock.lap();
var i: usize = 0;
while (i < 10000) : (i += 1) {
var randX = rand.float(f32) * 200;
var randY = rand.float(f32) * 200;
hash.addPoint(i, math.vec2(randX, randY));
}
var time = clock.lap();
std.debug.print(">> Took {d:.2}ms to create 10,000 points on a hash of usize.\n", .{@intToFloat(f64, time) / 1000000.0});
while (i < 20000) : (i += 1) {
var randX = rand.float(f32) * 200;
var randY = rand.float(f32) * 200;
hash.addPoint(i, math.vec2(randX, randY));
}
time = clock.lap();
std.debug.print(">> Took {d:.2}ms to create 10,000 more points on a hash of usize.\n", .{@intToFloat(f64, time) / 1000000.0});
i = 0;
var visited: i32 = 0;
while (i < 200) : (i += 1) {
for (hash.queryPoint(.{ .x = rand.float(f32) * 200, .y = rand.float(f32) * 200 })) |_| {
visited += 1;
}
}
time = clock.lap();
std.debug.print(">> Took {d:.2}ms to point iterate over a bucket 200 times, and visited {any} items.\n", .{ @intToFloat(f64, time) / 1000000.0, visited });
}
test "spatial point insertion/remove/query" {
const assert = @import("std").debug.assert;
var hash = Generate(i32, .{ .bucketSize = 64 }).init(std.testing.allocator);
defer hash.deinit();
hash.addPoint(40, .{ .x = 20, .y = 20 });
hash.addPoint(80, .{ .x = 100, .y = 100 });
{
var data = hash.queryPoint(.{ .x = 10, .y = 10 });
assert(data.len == 1);
assert(data[0] == 40);
}
{
hash.addPoint(100, .{ .x = 40, .y = 40 });
var data = hash.queryPoint(.{ .x = 10, .y = 10 });
assert(data[0] == 40);
assert(data[1] == 100);
assert(data.len == 2);
}
{
hash.removePoint(100, .{ .x = 40, .y = 40 });
var data = hash.queryPoint(.{ .x = 10, .y = 10 });
assert(data[0] == 40);
assert(data.len == 1);
}
}
test "spatial rect insertion/remove/query" {
const assert = @import("std").debug.assert;
var hash = Generate(i32, .{ .bucketSize = 100 }).init(std.testing.allocator);
defer hash.deinit();
hash.addAABB(1, math.vec2(50, 50), math.vec2(100, 100));
{
var data = hash.queryAABB(math.vec2(0, 0), math.vec2(150, 150));
assert(data.len == 1);
}
hash.addAABB(2, math.vec2(150, 150), math.vec2(100, 100));
{
var data = hash.queryAABB(math.vec2(0, 0), math.vec2(100, 100));
assert(data.len == 2);
}
hash.removeAABB(2, math.vec2(150, 150), math.vec2(100, 100));
{
var data = hash.queryAABB(math.vec2(0, 0), math.vec2(100, 100));
assert(data.len == 1);
}
}
test "spatial line query" {
const assert = @import("std").debug.assert;
var hash = Generate(i32, .{ .bucketSize = 100 }).init(std.testing.allocator);
defer hash.deinit();
// formation like
// * * *
//
//
// * *
//
//
//
//
//
//
// *
hash.addPoint(1, math.vec2(20, 20));
hash.addPoint(2, math.vec2(350, 350));
hash.addPoint(3, math.vec2(350, 20));
hash.addPoint(4, math.vec2(20, 350));
hash.addPoint(5, math.vec2(20, 3500));
hash.addPoint(6, math.vec2(3500, 20));
{
// horizontal, should have 2.
var data = hash.queryLine(math.vec2(20, 20), math.vec2(520, 20));
assert(data.len == 2);
// diagonal, should have 2.
data = hash.queryLine(math.vec2(0, 0), math.vec2(400, 400));
assert(data.len == 2);
// Reverse diagonal, should have 2.
data = hash.queryLine(math.vec2(400, 400), math.vec2(0, 0));
assert(data.len == 2);
// vertical, also 2.
data = hash.queryLine(math.vec2(20, 20), math.vec2(20, 520));
assert(data.len == 2);
}
} | src/zt/spatialHash.zig |
const std = @import("std");
const zm = @import("zmath");
const pow = std.math.pow;
const PI = std.math.pi;
const print = std.io.getStdOut().writer().print;
const printErr = std.io.getStdErr().writer().print;
const Vector = std.meta.Vector;
const Random = std.rand.Random;
const DefaultRandom = std.rand.DefaultPrng;
const ArrayList = std.ArrayList;
const Thread = std.Thread;
const OS = std.os;
const Mutex = Thread.Mutex;
const Sphere = @import("hittables.zig").Sphere;
const Ray = @import("ray.zig").Ray;
const Hit = @import("ray.zig").Hit;
const Material = @import("materials.zig").Material;
const LambertianMat = @import("materials.zig").LambertianMat;
const MetalMat = @import("materials.zig").MetalMat;
const DielectricMat = @import("materials.zig").DielectricMat;
const Pixel = struct {
r: u8,
g: u8,
b: u8,
};
fn outputPPMHeader(size: Vector(2, u32)) anyerror!void {
try print("P3\n", .{});
try print("{} {}\n", .{ size[0], size[1] });
try print("{}\n", .{255});
}
fn outputPixels(size: Vector(2, u32), pixels: []Pixel) anyerror!void {
var x: usize = 0;
var y: usize = size[1];
while (y > 0) {
y -= 1;
x = 0;
while (x < size[0]) : (x += 1) {
var index = y * size[0] + x;
try print("{} {} {}\n", pixels[index]);
}
}
}
fn background(r: Ray) Vector(3, f32) {
var y = zm.normalize3(r.dir)[1];
// -1; 1 -> 0; 1
y = (y + 1.0) * 0.5;
var percentage = 0.2 + y * 0.8;
const white = Vector(3, f32){ 1.0, 1.0, 1.0 };
const blue = Vector(3, f32){ 0.5, 0.7, 1.0 };
return zm.lerp(white, blue, percentage);
}
fn traceRay(ray: Ray, spheres: []Sphere, remainingBounces: u32, rng: Random) Vector(3, f32) {
if (remainingBounces <= 0) {
return Vector(3, f32){ 0.0, 0.0, 0.0 };
}
var nearestHit: ?Hit = null;
var hitMaterial: ?*const Material = null;
for (spheres) |sphere| {
var maxDistance: f32 = 1000000.0;
if (nearestHit) |hit| {
maxDistance = hit.rayFactor;
}
var maybeHit = sphere.hittable.testHit(ray, 0.001, maxDistance);
if (maybeHit) |hit| {
nearestHit = hit;
hitMaterial = sphere.material;
}
}
if (nearestHit) |hit| {
var scatteredRay = hitMaterial.?.scatter(&hit, ray, rng);
return scatteredRay.attenuation * traceRay(scatteredRay.ray, spheres, remainingBounces - 1, rng);
} else {
return background(ray);
}
}
const Camera = struct {
origin: Vector(4, f32),
right: Vector(4, f32),
up: Vector(4, f32),
focusPlaneLowerLeft: Vector(4, f32),
lensRadius: f32,
pub fn init(pos: Vector(4, f32), lookAt: Vector(4, f32), requestedUp: Vector(4, f32), vfov: f32, aspectRatio: f32, aperture: f32, focusDist: f32) Camera {
const h = @sin(vfov / 2.0) / @cos(vfov / 2.0);
const viewportHeight = 2.0 * h;
const viewportSize = Vector(2, f32){ viewportHeight * aspectRatio, viewportHeight };
var forward = zm.normalize3(lookAt - pos);
var right = zm.normalize3(zm.cross3(forward, requestedUp));
var up = zm.cross3(right, forward);
right = @splat(4, viewportSize[0] * focusDist) * right;
up = @splat(4, viewportHeight * focusDist) * up;
const focusPlaneLowerLeft = pos - right * zm.f32x4s(0.5) - up * zm.f32x4s(0.5) + @splat(4, focusDist) * forward;
return Camera{ .origin = pos, .right = right, .up = up, .focusPlaneLowerLeft = focusPlaneLowerLeft, .lensRadius = aperture / 2.0 };
}
pub fn generateRay(self: Camera, u: f32, v: f32, rng: Random) Ray {
const onLenseOffset = zm.normalize3(self.up) * @splat(4, self.lensRadius * rng.float(f32)) + zm.normalize3(self.right) * @splat(4, self.lensRadius * rng.float(f32));
const offsetOrigin = self.origin + onLenseOffset;
const dir = self.focusPlaneLowerLeft + @splat(4, u) * self.right + @splat(4, v) * self.up - offsetOrigin;
return Ray{ .origin = offsetOrigin, .dir = zm.normalize3(dir) };
}
};
const RenderThreadCtx = struct {
id: u32,
chunks: []Chunk,
rng: Random,
camera: *Camera,
spheres: []Sphere,
pixels: []Vector(3, f32),
size: Vector(2, u32),
spp: u32,
gamma: f32,
maxBounces: u32,
};
const Chunk = struct {
chunkTopRightPixelIndices: Vector(2, u32),
chunkSize: Vector(2, u32),
processingLock: Mutex,
processed: bool,
pub fn init(topRightPixelIndices: Vector(2, u32), chunkSize: Vector(2, u32)) Chunk {
return Chunk{ .chunkTopRightPixelIndices = topRightPixelIndices, .chunkSize = chunkSize, .processingLock = Mutex{}, .processed = false };
}
pub fn render(self: *Chunk, ctx: *const RenderThreadCtx) void {
var yOffset: usize = 0;
while (yOffset < self.chunkSize[1]) : (yOffset += 1) {
const y = self.chunkTopRightPixelIndices[1] + yOffset;
var xOffset: usize = 0;
while (xOffset < self.chunkSize[0]) : (xOffset += 1) {
const x = self.chunkTopRightPixelIndices[0] + xOffset;
var color = Vector(3, f32){ 0.0, 0.0, 0.0 };
var sample: u32 = 0;
while (sample < ctx.spp) : (sample += 1) {
var u = (@intToFloat(f32, x) + ctx.rng.float(f32)) / @intToFloat(f32, ctx.size[0]);
var v = (@intToFloat(f32, y) + ctx.rng.float(f32)) / @intToFloat(f32, ctx.size[1]);
var ray = ctx.camera.generateRay(u, v, ctx.rng);
color += traceRay(ray, ctx.spheres, ctx.maxBounces, ctx.rng);
}
ctx.pixels[y * ctx.size[0] + x] += color;
}
}
self.processed = true;
}
};
fn renderThreadFn(ctx: *RenderThreadCtx) void {
var areUnprocessedChunks = true;
while (areUnprocessedChunks) {
areUnprocessedChunks = false;
for (ctx.chunks) |*chunk| {
if (!chunk.processed and chunk.processingLock.tryLock()) {
printErr("Rendering (thread_{}): {}\n", .{ ctx.id, chunk.chunkTopRightPixelIndices }) catch {};
chunk.render(ctx);
chunk.processingLock.unlock();
areUnprocessedChunks = true;
}
}
}
}
pub fn main() anyerror!void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const allocator = gpa.allocator();
const aspectRatio = 16.0 / 9.0;
const width = 768;
//const width = 1920;
//const width = 2560;
//const width = 3840;
const size = Vector(2, u32){ width, width / aspectRatio };
const pixelCount = size[0] * size[1];
const spp = 32;
const maxBounces = 16;
const gamma = 2.2;
const cameraPos = Vector(4, f32){ 13.0, 2.0, 3.0, 0.0 };
const lookTarget = Vector(4, f32){ 0.0, 0.0, 0.0, 0.0 };
var camera = Camera.init(cameraPos, lookTarget, Vector(4, f32){ 0.0, 1.0, 0.0, 0.0 }, PI / 8.0, aspectRatio, 0.1, 10.0);
const materialCount = 16;
var diffuseMats: [materialCount]LambertianMat = undefined;
var metalMats: [materialCount]MetalMat = undefined;
var dielectricMats: [materialCount]DielectricMat = undefined;
{
var rng = DefaultRandom.init(0).random();
var materialIndex: u32 = 0;
while (materialIndex < materialCount) : (materialIndex += 1) {
diffuseMats[materialIndex] = LambertianMat.init(Vector(3, f32){ 0.1 + rng.float(f32) * 0.9, 0.1 + rng.float(f32) * 0.9, 0.1 + rng.float(f32) * 0.9 });
metalMats[materialIndex] = MetalMat.init(Vector(3, f32){ 0.1 + rng.float(f32) * 0.9, 0.1 + rng.float(f32) * 0.9, 0.1 + rng.float(f32) * 0.9 }, rng.float(f32) * 0.4);
dielectricMats[materialIndex] = DielectricMat.init(Vector(3, f32){ 0.6 + rng.float(f32) * 0.4, 0.6 + rng.float(f32) * 0.4, 0.6 + rng.float(f32) * 0.4 }, 1.5);
}
}
const sphereCount = 256 + 4;
var spheres: []Sphere = try allocator.alloc(Sphere, sphereCount);
defer allocator.free(spheres);
const dielectricMat = DielectricMat.init(Vector(3, f32){ 1.0, 1.0, 1.0 }, 1.5);
spheres[0] = Sphere.init(&dielectricMat.material, Vector(4, f32){ 0.0, 1.0, 0.0, 0.0 }, 1.0);
const bronzeMetalMat = MetalMat.init(Vector(3, f32){ 0.7, 0.5, 0.1 }, 0.0);
spheres[2] = Sphere.init(&bronzeMetalMat.material, Vector(4, f32){ 4.0, 1.0, 0.0, 0.0 }, 1.0);
const greyDiffuseMat = LambertianMat.init(Vector(3, f32){ 0.5, 0.5, 0.5 });
spheres[1] = Sphere.init(&greyDiffuseMat.material, Vector(4, f32){ -4.0, 1.0, 0.0, 0.0 }, 1.0);
const greenDiffuseMat = LambertianMat.init(Vector(3, f32){ 0.35, 0.6, 0.2 });
spheres[3] = Sphere.init(&greenDiffuseMat.material, Vector(4, f32){ 0.0, -2000.0, 0.0, 0.0 }, 2000);
{
var rng = DefaultRandom.init(0).random();
var sphereIndex: u32 = 4;
var x: f32 = 16 + 1;
while (x > 1) {
x -= 1;
var z: f32 = 16 + 1;
while (z > 1) {
z -= 1;
var radius = 0.05 + rng.float(f32) * 0.2;
var randomPos = Vector(4, f32){ (x + (rng.float(f32) - 0.5) - 12.0) * 2, radius, z + (rng.float(f32) - 0.5) - 8.0 };
const materialIndex = @floatToInt(u32, @round(rng.float(f32) * (materialCount - 1)));
var material = switch (rng.float(f32)) {
0.0...0.5 => &diffuseMats[materialIndex].material,
0.5...0.8 => &metalMats[materialIndex].material,
else => &dielectricMats[materialIndex].material,
};
spheres[sphereIndex] = Sphere.init(material, randomPos, radius);
sphereIndex += 1;
}
}
}
const chunkCountAlongAxis = 16;
const chunkCount = chunkCountAlongAxis * chunkCountAlongAxis;
var chunks: [chunkCount]Chunk = undefined;
const chunkSize = Vector(2, u32){ size[0] / chunkCountAlongAxis, size[1] / chunkCountAlongAxis };
var chunkIndex: u32 = 0;
while (chunkIndex < chunkCount) : (chunkIndex += 1) {
const chunkCol = @mod(chunkIndex, chunkCountAlongAxis);
const chunkRow = @divTrunc(chunkIndex, chunkCountAlongAxis);
const chunkStartIndices = Vector(2, u32){ chunkCol * chunkSize[0], chunkRow * chunkSize[1] };
chunks[chunkIndex] = Chunk.init(chunkStartIndices, chunkSize);
}
var accumulatedPixels: []Vector(3, f32) = try allocator.alloc(Vector(3, f32), pixelCount);
defer allocator.free(accumulatedPixels);
std.mem.set(Vector(3, f32), accumulatedPixels, Vector(3, f32){ 0.0, 0.0, 0.0 });
const threadCount = 12;
var ctxs: [threadCount]RenderThreadCtx = undefined;
var tasks: [threadCount]Thread = undefined;
var threadId: u32 = 0;
while (threadId < threadCount) : (threadId += 1) {
var ctx = RenderThreadCtx{
.id = threadId,
.chunks = &chunks,
.rng = DefaultRandom.init(threadId).random(),
.pixels = accumulatedPixels,
.camera = &camera,
.spheres = spheres,
.size = size,
.spp = spp,
.gamma = gamma,
.maxBounces = maxBounces,
};
ctxs[threadId] = ctx;
tasks[threadId] = try Thread.spawn(.{}, renderThreadFn, .{&ctxs[threadId]});
}
threadId = 0;
while (threadId < threadCount) : (threadId += 1) {
tasks[threadId].join();
}
try printErr("Writing...\n", .{});
try outputPPMHeader(size);
var img: []Pixel = try allocator.alloc(Pixel, pixelCount);
defer allocator.free(img);
var y: usize = size[1];
while (y > 0) {
y -= 1;
var x: usize = 0;
while (x < size[0]) : (x += 1) {
var color = accumulatedPixels[y * width + x];
img[y * width + x] = Pixel{
.r = @truncate(u8, @floatToInt(u32, pow(f32, color[0] / spp, 1.0 / gamma) * 255)),
.g = @truncate(u8, @floatToInt(u32, pow(f32, color[1] / spp, 1.0 / gamma) * 255)),
.b = @truncate(u8, @floatToInt(u32, pow(f32, color[2] / spp, 1.0 / gamma) * 255)),
};
}
}
try outputPixels(size, img);
} | src/main.zig |
const std = @import("std");
const upaya = @import("upaya");
const math = upaya.math;
const colors = upaya.colors;
const fs = std.fs;
usingnamespace upaya.imgui;
const stb = @import("stb");
var atlas: ?upaya.TexturePacker.Atlas = null;
var texture: ?upaya.Texture = null;
pub fn main() !void {
upaya.run(.{
.init = init,
.update = update,
.shutdown = shutdown,
.docking = false,
.width = 1024,
.height = 768,
.window_title = "Texture Packer",
.onFileDropped = onFileDropped,
});
}
fn init() void {}
fn shutdown() void {
if (atlas) |a| a.deinit();
}
fn update() void {
ogSetNextWindowPos(.{}, ImGuiCond_Always, .{});
ogSetNextWindowSize(.{
.x = @intToFloat(f32, upaya.sokol.sapp_width()),
.y = @intToFloat(f32, upaya.sokol.sapp_height()),
}, ImGuiCond_Always);
if (igBegin("Main Window", null, ImGuiWindowFlags_NoTitleBar)) {
if (atlas) |a| {
igText("Atlas Size:");
igSameLine(0, 5);
igSetNextItemWidth(100);
var tmp_size = [_]c_int{ @intCast(c_int, a.w), @intCast(c_int, a.h) };
_ = igInputInt2("", &tmp_size, ImGuiInputTextFlags_None);
igSameLine(0, 5);
if (ogButton("Save to Desktop")) {
const path_or_null = upaya.known_folders.getPath(upaya.mem.tmp_allocator, .desktop) catch unreachable;
if (path_or_null) |path| atlas.?.save(path, "test");
}
defer igEndChild();
if (ogBeginChildEx("#child", 666, ogGetContentRegionAvail(), true, ImGuiWindowFlags_NoTitleBar | ImGuiWindowFlags_HorizontalScrollbar)) {
var pos = ogGetCursorScreenPos();
const size = ImVec2{ .x = @intToFloat(f32, a.w), .y = @intToFloat(f32, a.h) };
ogAddRectFilled(igGetWindowDrawList(), pos, size, colors.rgbToU32(0, 0, 0));
ogAddRect(igGetWindowDrawList(), pos, size, colors.rgbToU32(155, 0, 155), 1);
_ = ogInvisibleButton("##rects", size, ImGuiButtonFlags_None);
for (a.rects) |rect| {
const tl = .{ .x = pos.x + @intToFloat(f32, rect.x), .y = pos.y + @intToFloat(f32, rect.y) };
ogAddRect(igGetWindowDrawList(), tl, .{ .x = @intToFloat(f32, rect.w), .y = @intToFloat(f32, rect.h) }, colors.rgbToU32(0, 255, 0), 1);
drawChunk(tl, rect.asRect());
}
}
} else {
var pos = ogGetCursorScreenPos();
const size = ogGetContentRegionAvail();
ogAddRectFilled(igGetWindowDrawList(), pos, size, colors.rgbToU32(80, 80, 80));
var text_size: ImVec2 = undefined;
igCalcTextSize(&text_size, "Drag/drop a folder", null, false, 1024);
ogSetCursorPos(.{ .x = (size.x / 2) - text_size.x, .y = size.y / 2 });
igGetCurrentContext().FontSize *= 2;
igText("Drag/drop a folder");
igGetCurrentContext().FontSize /= 2;
}
}
igEnd();
}
fn drawChunk(tl: ImVec2, rect: math.Rect) void {
var br = tl;
br.x += rect.w;
br.y += rect.h;
const inv_w = 1.0 / @intToFloat(f32, atlas.?.w);
const inv_h = 1.0 / @intToFloat(f32, atlas.?.h);
const uv0 = ImVec2{ .x = rect.x * inv_w, .y = rect.y * inv_h };
const uv1 = ImVec2{ .x = (rect.x + rect.w) * inv_w, .y = (rect.y + rect.h) * inv_h };
ogImDrawList_AddImage(igGetWindowDrawList(), texture.?.imTextureID(), tl, br, uv0, uv1, 0xFFFFFFFF);
}
fn onFileDropped(file: []const u8) void {
if (fs.cwd().openDir(file, .{ .iterate = true })) |dir| {
atlas = upaya.TexturePacker.pack(file) catch unreachable;
if (texture) |tex| tex.deinit();
texture = atlas.?.image.asTexture(.nearest);
} else |err| {
std.debug.print("Dropped a non-directory: {}, err: {}\n", .{ file, err });
}
} | examples/texture_packer.zig |
usingnamespace @import("std").builtin;
/// Deprecated
pub const arch = Target.current.cpu.arch;
/// Deprecated
pub const endian = Target.current.cpu.arch.endian();
/// Zig version. When writing code that supports multiple versions of Zig, prefer
/// feature detection (i.e. with `@hasDecl` or `@hasField`) over version checks.
pub const zig_version = try @import("std").SemanticVersion.parse("0.8.0-dev.2065+bc06e1982");
pub const zig_is_stage2 = false;
pub const output_mode = OutputMode.Exe;
pub const link_mode = LinkMode.Static;
pub const is_test = true;
pub const single_threaded = false;
pub const abi = Abi.msvc;
pub const cpu: Cpu = Cpu{
.arch = .x86_64,
.model = &Target.x86.cpu.skylake,
.features = Target.x86.featureSet(&[_]Target.x86.Feature{
.@"64bit",
.adx,
.aes,
.avx,
.avx2,
.bmi,
.bmi2,
.clflushopt,
.cmov,
.cx16,
.cx8,
.ermsb,
.f16c,
.false_deps_popcnt,
.fast_15bytenop,
.fast_gather,
.fast_scalar_fsqrt,
.fast_shld_rotate,
.fast_variable_shuffle,
.fast_vector_fsqrt,
.fma,
.fsgsbase,
.fxsr,
.idivq_to_divl,
.invpcid,
.lzcnt,
.macrofusion,
.mmx,
.movbe,
.nopl,
.pclmul,
.popcnt,
.prfchw,
.rdrnd,
.rdseed,
.rtm,
.sahf,
.slow_3ops_lea,
.sse,
.sse2,
.sse3,
.sse4_1,
.sse4_2,
.ssse3,
.vzeroupper,
.x87,
.xsave,
.xsavec,
.xsaveopt,
.xsaves,
}),
};
pub const os = Os{
.tag = .windows,
.version_range = .{ .windows = .{
.min = .win10_fe,
.max = .win10_fe,
}},
};
pub const object_format = ObjectFormat.coff;
pub const mode = Mode.Debug;
pub const link_libc = false;
pub const link_libcpp = false;
pub const have_error_return_tracing = true;
pub const valgrind_support = false;
pub const position_independent_code = true;
pub const position_independent_executable = false;
pub const strip_debug_info = false;
pub const code_model = CodeModel.default;
pub var test_functions: []TestFn = undefined; // overwritten later
pub const test_io_mode = .blocking; | src/math/zig-cache/o/8d7d9ae0f723e92a28b38e71154cbc0f/builtin.zig |
const c = @import("../../c_global.zig").c_imp;
const std = @import("std");
// dross-zig
const OpenGlError = @import("renderer_opengl.zig").OpenGlError;
const FileLoader = @import("../../utils/file_loader.zig");
// -----------------------------------------
// -----------------------------------------
// - ShaderTypeGl -
// -----------------------------------------
//TODO(devon): Move to Shader.zig eventually
/// Describes what type of shader
pub const ShaderTypeGl = enum(c_uint) {
Vertex = c.GL_VERTEX_SHADER,
Fragment = c.GL_FRAGMENT_SHADER,
Geometry = c.GL_GEOMETRY_SHADER,
};
// -----------------------------------------
// - ShaderGl -
// -----------------------------------------
/// Container that processes and compiles a sources GLSL file
pub const ShaderGl = struct {
/// OpenGL generated ID
handle: c_uint,
shader_type: ShaderTypeGl,
const Self = @This();
/// Allocates and builds the shader of the requested shader type
pub fn new(allocator: *std.mem.Allocator, shader_type: ShaderTypeGl) !*Self {
var self = try allocator.create(ShaderGl);
self.handle = c.glCreateShader(@enumToInt(shader_type));
self.shader_type = shader_type;
return self;
}
/// Cleans up and de-allocates the Shader instance
pub fn free(allocator: *std.mem.Allocator, self: *Self) void {
c.glDeleteShader(self.handle);
allocator.destroy(self);
}
/// Returns the OpenGL-generated shader id
pub fn id(self: *Self) c_uint {
return self.handle;
}
/// Sources a given GLSL shader file
pub fn source(self: *Self, path: [:0]const u8) !void {
const source_slice = FileLoader.loadFile(path, 4096) catch |err| {
std.debug.print("[Shader]: Failed to load shader ({s})! {}\n", .{ path, err });
return err;
};
const source_size = source_slice.?.len;
c.glShaderSource(self.handle, 1, &source_slice.?.ptr, @ptrCast(*const c_int, &source_size));
}
/// Compiles the previously sources GLSL shader file, and checks for any compilation errors.
pub fn compile(self: *Self) !void {
var no_errors: c_int = undefined;
var compilation_log: [512]u8 = undefined;
c.glCompileShader(self.handle);
c.glGetShaderiv(self.handle, c.GL_COMPILE_STATUS, &no_errors);
// If the compilation failed, log the message
if (no_errors == 0) {
c.glGetShaderInfoLog(self.handle, 512, null, &compilation_log);
std.log.err("[Renderer][OpenGL]: Failed to compile {s} shader: \n{s}", .{ self.shader_type, compilation_log });
return OpenGlError.ShaderCompilationFailure;
}
}
};
// ------------------------------------------
// - Tests -
// ------------------------------------------
test "Read Shader Test" {
const file = try std.fs.cwd().openFile(
"src/renderer/shaders/default_shader.vs",
.{},
);
defer file.close();
var buffer: [4096]u8 = undefined;
try file.seekTo(0);
const bytes_read = try file.readAll(&buffer);
const slice = buffer[0..bytes_read];
std.debug.print("{s}\n", .{slice});
} | src/renderer/backend/shader_opengl.zig |
const std = @import("std");
const builtin = std.builtin;
const build_root = "../build/";
const is_windows = std.Target.current.os.tag == .windows;
const is_macos = std.Target.current.os.tag == .macos;
pub fn build(b: *std.build.Builder) anyerror!void {
const mode = b.standardReleaseOptions();
// Previously was exe.enableSystemLinkerHack(): See https://github.com/jeffkdev/sokol-zig-examples/issues/2
if (is_macos) try b.env_map.put("ZIG_SYSTEM_LINKER_HACK", "1");
// Probably can take command line arg to build different examples
// For now rename the mainFile const below (ex: "example_triangle.zig")
const mainFile = "main.zig";
var exe = b.addExecutable("program", "../src/" ++ mainFile);
exe.addIncludeDir("../src/");
exe.setBuildMode(mode);
const cFlags = if (is_macos) [_][]const u8{ "-std=c99", "-ObjC", "-fobjc-arc" } else [_][]const u8{"-std=c99", "-mno-avx"};
exe.addCSourceFile("../src/compile_sokol.c", &cFlags);
// Add cglm
exe.addIncludeDir("../src/cglm/include/");
exe.addIncludeDir("../src/cglm/include/cglm");
exe.addCSourceFile("../src/cglm/src/euler.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/affine.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/io.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/quat.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/cam.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/vec2.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/vec3.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/vec4.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/mat2.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/mat3.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/mat4.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/plane.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/frustum.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/box.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/project.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/sphere.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/ease.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/curve.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/bezier.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/ray.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/affine2d.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/clipspace/persp_lh_zo.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/clipspace/persp_rh_zo.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/clipspace/persp_lh_no.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/clipspace/persp_rh_no.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/clipspace/ortho_lh_zo.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/clipspace/ortho_rh_zo.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/clipspace/ortho_lh_no.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/clipspace/ortho_rh_no.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/clipspace/view_lh_zo.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/clipspace/view_rh_zo.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/clipspace/view_lh_no.c", &cFlags);
exe.addCSourceFile("../src/cglm/src/clipspace/view_rh_no.c", &cFlags);
// ImGui & cimgui
const cpp_args = [_][]const u8{ "-Wno-deprecated-declarations", "-Wno-return-type-c-linkage", "-fno-exceptions", "-fno-threadsafe-statics" };
exe.addCSourceFile("../src/cimgui/imgui/imgui.cpp", &cpp_args);
exe.addCSourceFile("../src/cimgui/imgui/imgui_demo.cpp", &cpp_args);
exe.addCSourceFile("../src/cimgui/imgui/imgui_draw.cpp", &cpp_args);
exe.addCSourceFile("../src/cimgui/imgui/imgui_widgets.cpp", &cpp_args);
exe.addCSourceFile("../src/cimgui/imgui/imgui_tables.cpp", &cpp_args);
exe.addCSourceFile("../src/cimgui/cimgui.cpp", &cpp_args);
// Shaders
exe.addCSourceFile("../src/shaders/cube_compile.c", &[_][]const u8{"-std=c99"});
exe.addCSourceFile("../src/shaders/triangle_compile.c", &[_][]const u8{"-std=c99"});
exe.addCSourceFile("../src/shaders/instancing_compile.c", &[_][]const u8{"-std=c99"});
exe.linkLibC();
exe.linkSystemLibrary("c++");
if (is_windows) {
//See https://github.com/ziglang/zig/issues/8531 only matters in release mode
exe.want_lto = false;
exe.linkSystemLibrary("user32");
exe.linkSystemLibrary("gdi32");
exe.linkSystemLibrary("ole32"); // For Sokol audio
} else if (is_macos) {
const frameworks_dir = try macos_frameworks_dir(b);
exe.addFrameworkDir(frameworks_dir);
exe.linkFramework("Foundation");
exe.linkFramework("Cocoa");
exe.linkFramework("Quartz");
exe.linkFramework("QuartzCore");
exe.linkFramework("Metal");
exe.linkFramework("MetalKit");
exe.linkFramework("OpenGL");
exe.linkFramework("Audiotoolbox");
exe.linkFramework("CoreAudio");
} else {
// Not tested
@panic("OS not supported. Try removing panic in build.zig if you want to test this");
exe.linkSystemLibrary("GL");
exe.linkSystemLibrary("GLEW");
}
const run_cmd = exe.run();
const run_step = b.step("run", "Run the app");
run_step.dependOn(&run_cmd.step);
b.default_step.dependOn(&exe.step);
b.installArtifact(exe);
}
// helper function to get SDK path on Mac sourced from: https://github.com/floooh/sokol-zig
fn macos_frameworks_dir(b: *std.build.Builder) ![]u8 {
var str = try b.exec(&[_][]const u8{ "xcrun", "--show-sdk-path" });
const strip_newline = std.mem.lastIndexOf(u8, str, "\n");
if (strip_newline) |index| {
str = str[0..index];
}
const frameworks_dir = try std.mem.concat(b.allocator, u8, &[_][]const u8{ str, "/System/Library/Frameworks" });
return frameworks_dir;
} | src/build.zig |
const image = @import("./image.zig");
const t = @import("../testing/index.zig");
const std = @import("std");
fn in(f: image.Rectangle, g: image.Rectangle) bool {
if (!f.in(g)) {
return false;
}
var y = f.min.y;
while (y < f.max.y) {
var x = f.min.x;
while (x < f.max.x) {
var p = image.Point.init(x, y);
if (!p.in(g)) {
return false;
}
x += 1;
}
y += 1;
}
return true;
}
const rectangles = []image.Rectangle{
image.Rectangle.rect(0, 0, 10, 10),
image.Rectangle.rect(10, 0, 20, 10),
image.Rectangle.rect(1, 2, 3, 4),
image.Rectangle.rect(4, 6, 10, 10),
image.Rectangle.rect(2, 3, 12, 5),
image.Rectangle.rect(-1, -2, 0, 0),
image.Rectangle.rect(-1, -2, 4, 6),
image.Rectangle.rect(-10, -20, 30, 40),
image.Rectangle.rect(8, 8, 8, 8),
image.Rectangle.rect(88, 88, 88, 88),
image.Rectangle.rect(6, 5, 4, 3),
};
test "Rectangle" {
for (rectangles) |r| {
for (rectangles) |s| {
const got = r.eq(s);
const want = in(r, s) and in(s, r);
if (got != want) {
try t.terrorf("\n {}:{} expected {} to be in {}\n", got, want, r, s);
}
}
}
for (rectangles) |r| {
for (rectangles) |s| {
const a = r.intersect(s);
if (!in(a, r)) {
try t.terrorf("\n {} {} {}\n", r, s, a);
}
if (!in(a, s)) {
try t.terrorf("\nexpected {} to be in {}\n", a, s);
}
const is_zero = a.eq(image.Rectangle.zero());
const overlaps = r.overlaps(s);
if (is_zero == overlaps) {
try t.terrorf("\n Intersect: r={}, s={}, a={}: is_zero={} same as overlaps={}\n", r, s, a, is_zero, overlaps);
}
const larger_than_a = []image.Rectangle{
image.Rectangle.init(
a.min.x - 1,
a.min.y,
a.max.x,
a.max.y,
),
image.Rectangle.init(
a.min.x,
a.min.y - 1,
a.max.x,
a.max.y,
),
image.Rectangle.init(
a.min.x,
a.min.y,
a.max.x + 1,
a.max.y,
),
image.Rectangle.init(
a.min.x,
a.min.y,
a.max.x,
a.max.y + 1,
),
};
for (larger_than_a) |b| {
if (b.empty()) {
continue;
}
if (in(b, r) and in(b, s)) {
try t.terrorf("\n Intersect: r={}, s={}, a={}, b={} :intersection could be larger\n", r, s, a, b);
}
}
}
}
for (rectangles) |r| {
for (rectangles) |s| {
const a = r.runion(s);
if (!in(r, a)) {
try t.terrorf("\nUnion: r={}, s={}, a={} r not in a ", r, s, a);
}
if (!in(s, a)) {
try t.terrorf("\nUnion: r={}, s={}, a={} s not in a ", r, s, a);
}
if (a.empty()) {
continue;
}
const smaller_than_a = []image.Rectangle{
image.Rectangle.init(
a.min.x + 1,
a.min.y,
a.max.x,
a.max.y,
),
image.Rectangle.init(
a.min.x,
a.min.y + 1,
a.max.x,
a.max.y,
),
image.Rectangle.init(
a.min.x,
a.min.y,
a.max.x - 1,
a.max.y,
),
image.Rectangle.init(
a.min.x,
a.min.y,
a.max.x,
a.max.y - 1,
),
};
for (smaller_than_a) |b| {
if (in(r, b) and in(s, b)) {
try t.terrorf("\nUnion: r={}, s={}, a={}, b={}: union could be smaller ", r, s, a, b);
}
}
}
}
}
const TestImage = struct{
name: []const u8,
image: image.Image,
mem: []u8,
};
fn newRGBA(a: *std.mem.Allocator, r: image.Rectangle) !TestImage {
const w = @intCast(usize, r.dx());
const h = @intCast(usize, r.dy());
const size = 4 * w * h;
var u = try a.alloc(u8, size);
var m = &image.RGBA.init(u, 4 * r.dx(), r);
return TestImage{
.name = "RGBA",
.image = m.image(),
.mem = u,
};
}
test "Image" {
var allocator = std.debug.global_allocator;
const rgb = try newRGBA(allocator, image.Rectangle.rect(0, 0, 10, 10));
defer allocator.free(rgb.mem);
const test_images = []TestImage{rgb};
for (test_images) |tc| {
const r = image.Rectangle.rect(0, 0, 10, 10);
if (!r.eq(tc.image.bounds)) {
try t.terrorf("\n want bounds={} got {}\n", r, tc.image.bounds);
}
}
} | src/image/image_test.zig |
const std = @import("std");
const expectEqual = std.testing.expectEqual;
const approxEq = std.math.approxEq;
const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
const Buffer = std.Buffer;
const fixedBufferStream = std.io.fixedBufferStream;
pub const Pos = struct {
x: usize,
y: usize,
const Self = @This();
pub fn eq(a: Self, b: Self) bool {
return a.x == b.x and a.y == b.y;
}
};
pub fn pos(x: usize, y: usize) Pos {
return Pos{ .x = x, .y = y };
}
fn absDiff(x: usize, y: usize) usize {
return if (x > y) x - y else y - x;
}
fn diff(x: usize, y: usize) isize {
return @intCast(isize, x) - @intCast(isize, y);
}
fn sign(x: isize) isize {
if (x > 0) {
return 1;
} else if (x < 0) {
return -1;
} else {
return 0;
}
}
fn divToFloat(x: isize, y: isize) f64 {
return @intToFloat(f64, x) / @intToFloat(f64, y);
}
pub fn obstructs(origin: Pos, dest: Pos, obstr: Pos) bool {
// the possible obstruction must be nearer
if (absDiff(origin.x, dest.x) < absDiff(origin.x, obstr.x))
return false;
if (absDiff(origin.y, dest.y) < absDiff(origin.y, obstr.y))
return false;
const origin_dest_x = diff(dest.x, origin.x);
const origin_obstr_x = diff(obstr.x, origin.x);
if (sign(origin_dest_x) != sign(origin_obstr_x))
return false;
const origin_dest_y = diff(dest.y, origin.y);
const origin_obstr_y = diff(obstr.y, origin.y);
if (sign(origin_dest_y) != sign(origin_obstr_y))
return false;
// the multiple of x and y must be the same
if (origin_dest_x == 0) {
return origin_obstr_x == 0;
} else if (origin_dest_y == 0) {
return origin_obstr_y == 0;
} else {
const epsilon = 0.000001;
return approxEq(f64, divToFloat(origin_obstr_x, origin_dest_x), divToFloat(origin_obstr_y, origin_dest_y), epsilon);
}
}
test "obstruction" {
expectEqual(true, obstructs(pos(0, 0), pos(4, 4), pos(2, 2)));
expectEqual(true, obstructs(pos(0, 0), pos(6, 6), pos(2, 2)));
expectEqual(true, obstructs(pos(0, 0), pos(6, 6), pos(4, 4)));
expectEqual(false, obstructs(pos(0, 0), pos(2, 2), pos(4, 4)));
expectEqual(false, obstructs(pos(2, 2), pos(0, 0), pos(4, 4)));
expectEqual(false, obstructs(pos(2, 2), pos(4, 0), pos(4, 4)));
expectEqual(true, obstructs(pos(0, 0), pos(2, 6), pos(1, 3)));
expectEqual(false, obstructs(pos(0, 0), pos(2, 7), pos(1, 3)));
expectEqual(true, obstructs(pos(0, 0), pos(0, 5), pos(0, 2)));
expectEqual(true, obstructs(pos(0, 0), pos(5, 0), pos(2, 0)));
}
pub const AsteroidMap = struct {
asteroids: []Pos,
const Self = @This();
pub fn fromStream(stream: anytype, allocator: *Allocator) !Self {
var asteroids = ArrayList(Pos).init(allocator);
var y: usize = 0;
while (stream.readUntilDelimiterAlloc(allocator, '\n', 1024)) |line| {
for (line) |c, i| {
switch (c) {
'#' => try asteroids.append(Pos{
.x = i,
.y = y,
}),
'.' => {},
else => {
std.debug.warn("invalid char: {}\n", .{c});
return error.InvalidMapCharacter;
},
}
}
y += 1;
} else |e| switch (e) {
error.EndOfStream => {},
else => return e,
}
return Self{
.asteroids = asteroids.items,
};
}
pub fn detectableAsteroids(self: Self, position: Pos) usize {
var result: usize = 0;
for (self.asteroids) |x| {
if (x.eq(position))
continue;
const obstructed = for (self.asteroids) |y| {
if (y.eq(position) or y.eq(x))
continue;
if (obstructs(position, x, y))
break true;
} else false;
if (!obstructed)
result += 1;
}
return result;
}
pub fn maxDetectableAsteroids(self: Self) ?usize {
var result: ?usize = null;
for (self.asteroids) |x| {
const val = self.detectableAsteroids(x);
if (result) |max| {
if (val > max)
result = val;
} else {
result = val;
}
}
return result;
}
};
test "read asteroid map" {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
const allocator = &arena.allocator;
defer arena.deinit();
var input_stream = fixedBufferStream(
\\..#
\\#.#
\\...
\\
).reader();
const map = try AsteroidMap.fromStream(input_stream, allocator);
expectEqual(@intCast(usize, 3), map.asteroids.len);
expectEqual(map.asteroids[0], Pos{ .x = 2, .y = 0 });
expectEqual(map.asteroids[1], Pos{ .x = 0, .y = 1 });
expectEqual(map.asteroids[2], Pos{ .x = 2, .y = 1 });
}
test "count visible asteroids" {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
const allocator = &arena.allocator;
defer arena.deinit();
var input_stream = fixedBufferStream(
\\.#..#
\\.....
\\#####
\\....#
\\...##
\\
).reader();
const map = try AsteroidMap.fromStream(input_stream, allocator);
expectEqual(@intCast(usize, 10), map.asteroids.len);
expectEqual(@intCast(usize, 7), map.detectableAsteroids(pos(1, 0)));
expectEqual(@intCast(usize, 7), map.detectableAsteroids(pos(4, 0)));
expectEqual(@intCast(usize, 6), map.detectableAsteroids(pos(0, 2)));
expectEqual(@intCast(usize, 7), map.detectableAsteroids(pos(1, 2)));
expectEqual(@intCast(usize, 7), map.detectableAsteroids(pos(2, 2)));
expectEqual(@intCast(usize, 7), map.detectableAsteroids(pos(3, 2)));
expectEqual(@intCast(usize, 5), map.detectableAsteroids(pos(4, 2)));
expectEqual(@intCast(usize, 7), map.detectableAsteroids(pos(4, 3)));
expectEqual(@intCast(usize, 7), map.detectableAsteroids(pos(4, 4)));
expectEqual(@intCast(usize, 8), map.detectableAsteroids(pos(3, 4)));
}
test "max visible asteroids 1" {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
const allocator = &arena.allocator;
defer arena.deinit();
var input_stream = fixedBufferStream(
\\.#..#
\\.....
\\#####
\\....#
\\...##
\\
).reader();
const map = try AsteroidMap.fromStream(input_stream, allocator);
expectEqual(@intCast(usize, 10), map.asteroids.len);
expectEqual(@intCast(usize, 8), map.maxDetectableAsteroids().?);
}
test "max visible asteroids 2" {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
const allocator = &arena.allocator;
defer arena.deinit();
var input_stream = fixedBufferStream(
\\......#.#.
\\#..#.#....
\\..#######.
\\.#.#.###..
\\.#..#.....
\\..#....#.#
\\#..#....#.
\\.##.#..###
\\##...#..#.
\\.#....####
\\
).reader();
const map = try AsteroidMap.fromStream(input_stream, allocator);
expectEqual(@intCast(usize, 33), map.maxDetectableAsteroids().?);
}
test "max visible asteroids 3" {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
const allocator = &arena.allocator;
defer arena.deinit();
var input_stream = fixedBufferStream(
\\#.#...#.#.
\\.###....#.
\\.#....#...
\\##.#.#.#.#
\\....#.#.#.
\\.##..###.#
\\..#...##..
\\..##....##
\\......#...
\\.####.###.
\\
).reader();
const map = try AsteroidMap.fromStream(input_stream, allocator);
expectEqual(@intCast(usize, 35), map.maxDetectableAsteroids().?);
}
test "max visible asteroids 4" {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
const allocator = &arena.allocator;
defer arena.deinit();
var input_stream = fixedBufferStream(
\\.#..#..###
\\####.###.#
\\....###.#.
\\..###.##.#
\\##.##.#.#.
\\....###..#
\\..#.#..#.#
\\#..#.#.###
\\.##...##.#
\\.....#.#..
\\
).reader();
const map = try AsteroidMap.fromStream(input_stream, allocator);
expectEqual(@intCast(usize, 41), map.maxDetectableAsteroids().?);
}
test "max visible asteroids 5" {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
const allocator = &arena.allocator;
defer arena.deinit();
var input_stream = fixedBufferStream(
\\.#..##.###...#######
\\##.############..##.
\\.#.######.########.#
\\.###.#######.####.#.
\\#####.##.#.##.###.##
\\..#####..#.#########
\\####################
\\#.####....###.#.#.##
\\##.#################
\\#####.##.###..####..
\\..######..##.#######
\\####.##.####...##..#
\\.#####..#.######.###
\\##...#.##########...
\\#.##########.#######
\\.####.#.###.###.#.##
\\....##.##.###..#####
\\.#.#.###########.###
\\#.#.#.#####.####.###
\\###.##.####.##.#..##
\\
).reader();
const map = try AsteroidMap.fromStream(input_stream, allocator);
expectEqual(@intCast(usize, 210), map.maxDetectableAsteroids().?);
}
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
const allocator = &arena.allocator;
defer arena.deinit();
const input_file = try std.fs.cwd().openFile("input10.txt", .{});
var input_stream = input_file.reader();
const map = try AsteroidMap.fromStream(input_stream, allocator);
const max = map.maxDetectableAsteroids().?;
std.debug.warn("max detectable asteroids: {}\n", .{max});
} | zig/10.zig |
const sf = @import("../sfml.zig");
const Music = @This();
// Constructor/destructor
/// Loads music from a file
pub fn createFromFile(path: [:0]const u8) !Music {
var music = sf.c.sfMusic_createFromFile(path);
if (music == null)
return sf.Error.resourceLoadingError;
return Music{ ._ptr = music.? };
}
pub const initFromMemory = @compileError("Function is not implemented yet.");
pub const initFromStream = @compileError("Function is not implemented yet.");
/// Destroys this music object
pub fn destroy(self: *Music) void {
sf.c.sfMusic_destroy(self._ptr);
}
// Music control functions
/// Plays the music
pub fn play(self: *Music) void {
sf.c.sfMusic_play(self._ptr);
}
/// Pauses the music
pub fn pause(self: *Music) void {
sf.c.sfMusic_pause(self._ptr);
}
/// Stops the music and resets the player position
pub fn stop(self: *Music) void {
sf.c.sfMusic_stop(self._ptr);
}
// Getters / Setters
/// Gets the total duration of the music
pub fn getDuration(self: Music) sf.Time {
return sf.Time._fromCSFML(sf.c.sfMusic_getDuration(self._ptr));
}
/// Gets the current stream position of the music
pub fn getPlayingOffset(self: Music) sf.Time {
return sf.Time._fromCSFML(sf.c.sfMusic_getPlayingOffset(self._ptr));
}
/// Sets the current stream position of the music
pub fn setPlayingOffset(self: *Music, offset: sf.Time) void {
sf.c.sfMusic_setPlayingOffset(self._ptr, offset._toCSFML());
}
/// Gets the loop points of the music
pub fn getLoopPoints(self: Music) sf.TimeSpan {
return sf.TimeSpan._fromCSFML(sf.c.sfMusic_getLoopPoints(self._ptr));
}
/// Gets the loop points of the music
pub fn setLoopPoints(self: *Music, span: sf.TimeSpan) void {
sf.c.sfMusic_setLoopPoints(self._ptr, span._toCSFML());
}
/// Tells whether or not this stream is in loop mode
pub fn getLoop(self: Music) bool {
return sf.c.sfMusic_getLoop(self._ptr) != 0;
}
/// Enable or disable auto loop
pub fn setLoop(self: *Music, loop: bool) void {
sf.c.sfMusic_setLoop(self._ptr, @boolToInt(loop));
}
/// Sets the pitch of the music
pub fn getPitch(self: Music) f32 {
return sf.c.sfMusic_getPitch(self._ptr);
}
/// Gets the pitch of the music
pub fn setPitch(self: *Music, pitch: f32) void {
sf.c.sfMusic_setPitch(self._ptr, pitch);
}
/// Sets the volume of the music
pub fn getVolume(self: Music) f32 {
return sf.c.sfMusic_getVolume(self._ptr);
}
/// Gets the volume of the music
pub fn setVolume(self: *Music, volume: f32) void {
sf.c.sfMusic_setVolume(self._ptr, volume);
}
/// Gets the sample rate of this music
pub fn getSampleRate(self: Music) usize {
return @intCast(usize, sf.c.sfMusic_getSampleRate(self._ptr));
}
/// Gets the channel count of the music
pub fn getChannelCount(self: Music) usize {
return @intCast(usize, sf.c.sfMusic_getChannelCount(self._ptr));
}
pub const getStatus = @compileError("Function is not implemented yet.");
pub const setRelativeToListener = @compileError("Function is not implemented yet.");
pub const isRelativeToListener = @compileError("Function is not implemented yet.");
pub const setMinDistance = @compileError("Function is not implemented yet.");
pub const setAttenuation = @compileError("Function is not implemented yet.");
pub const getMinDistance = @compileError("Function is not implemented yet.");
pub const getAttenuation = @compileError("Function is not implemented yet.");
/// Pointer to the csfml music
_ptr: *sf.c.sfMusic, | src/sfml/audio/Music.zig |
const std = @import("std");
const Allocator = std.mem.Allocator;
const ogg = @import("ogg.zig");
const Metadata = @import("metadata.zig").Metadata;
pub const codec_id = "vorbis";
// bit flags for the header type (used in the first byte of a page's data)
// from https://xiph.org/vorbis/doc/Vorbis_I_spec.html#x1-620004.2.1
pub const PacketType = enum(u8) {
audio = 0,
identification = 1,
comment = 3,
setup = 5,
};
/// Note: It is up to the caller to set metadata start/end offsets, those are not
/// set within this function
pub fn readComment(allocator: Allocator, reader: anytype, seekable_stream: anytype) !Metadata {
var metadata: Metadata = Metadata.init(allocator);
errdefer metadata.deinit();
var metadata_map = &metadata.map;
const vendor_length = try reader.readIntLittle(u32);
try reader.skipBytes(vendor_length, .{});
const user_comment_list_length = try reader.readIntLittle(u32);
var user_comment_index: u32 = 0;
while (user_comment_index < user_comment_list_length) : (user_comment_index += 1) {
const comment_length = try reader.readIntLittle(u32);
// short circuit for impossible comment lengths to avoid
// giant allocations that we know are impossible to read
const max_remaining_bytes = (try seekable_stream.getEndPos()) - (try seekable_stream.getPos());
if (comment_length > max_remaining_bytes) {
return error.EndOfStream;
}
var comment = try allocator.alloc(u8, comment_length);
defer allocator.free(comment);
try reader.readNoEof(comment);
var split_it = std.mem.split(u8, comment, "=");
var field = split_it.next() orelse return error.InvalidCommentField;
var value = split_it.rest();
// Vorbis comments are case-insensitive, so always convert them to
// upper case here in order to make that straightforward on
// the storage side of things
const field_upper = try std.ascii.allocUpperString(allocator, field);
defer allocator.free(field_upper);
try metadata_map.put(field_upper, value);
}
return metadata;
}
/// Expects the stream to be at the start of the Ogg bitstream (i.e.
/// any ID3v2 tags must be skipped before calling this function)
pub fn read(allocator: Allocator, reader: anytype, seekable_stream: anytype) !Metadata {
_ = seekable_stream;
const ogg_page_reader = ogg.oggPageReader(reader).reader();
// identification
const id_header_type = try ogg_page_reader.readByte();
if (id_header_type != @enumToInt(PacketType.identification)) {
return error.UnexpectedHeaderType;
}
const id_signature = try ogg_page_reader.readBytesNoEof(codec_id.len);
if (!std.mem.eql(u8, &id_signature, codec_id)) {
return error.UnexpectedCodec;
}
_ = try ogg_page_reader.skipBytes(22, .{});
const id_framing_bit = try ogg_page_reader.readByte();
if (id_framing_bit & 1 != 1) {
return error.MissingFramingBit;
}
// comment
const header_type = try ogg_page_reader.readByte();
if (header_type != @enumToInt(PacketType.comment)) {
return error.UnexpectedHeaderType;
}
const comment_signature = try ogg_page_reader.readBytesNoEof(codec_id.len);
if (!std.mem.eql(u8, &comment_signature, codec_id)) {
return error.UnexpectedCodec;
}
const start_offset = try seekable_stream.getPos();
var metadata = try readComment(allocator, ogg_page_reader, seekable_stream);
errdefer metadata.deinit();
metadata.start_offset = start_offset;
metadata.end_offset = try seekable_stream.getPos();
// verify framing bit
const comment_framing_bit = try ogg_page_reader.readByte();
if (comment_framing_bit & 1 != 1) {
return error.MissingFramingBit;
}
return metadata;
} | src/vorbis.zig |
const std = @import("std");
const Allocator = std.mem.Allocator;
const List = std.ArrayList;
const Map = std.AutoHashMap;
const StrMap = std.StringHashMap;
const BitSet = std.DynamicBitSet;
const Str = []const u8;
const int = i64;
const util = @import("util.zig");
const gpa = util.gpa;
const data = @embedFile("../data/day14.txt");
const Key = struct {
pair: [2]u8,
depth: u8,
};
const Result = std.meta.Vector(26, u64);
const Memo = Map(Key, Result);
const Rules = std.AutoArrayHashMap([2]u8, u8);
const CountsArr = [26]u64;
const CountsVec = std.meta.Vector(26, u64);
const PairId = u8;
const Link = struct {
left: PairId,
right: PairId,
};
const Parts = struct {
part1: u64,
part2: u64,
};
pub fn main() !void {
var template: []const u8 = undefined;
var rules = Rules.init(gpa);
defer rules.deinit();
{
var lines = tokenize(u8, data, "\r\n");
template = lines.next().?;
while (lines.next()) |line| {
if (line.len == 0) { continue; }
var parts = tokenize(u8, line, " -> ");
const key = parts.next().?;
const val = parts.next().?;
assert(parts.next() == null);
assert(key.len == 2);
assert(val.len == 1);
try rules.put(key[0..2].*, val[0]);
}
}
bench(eager, rules, template, "eager");
bench(fungible, rules, template, "fungible");
bench(lazy, rules, template, "lazy");
const result = fungible(rules, template);
print("part1={}, part2={}\n", .{result.part1, result.part2});
}
fn bench(comptime func: fn (Rules, []const u8) Parts, rules: Rules, template: []const u8, name: []const u8) void {
var i: usize = 0;
var best_time: usize = std.math.maxInt(usize);
var total_time: usize = 0;
const num_runs = 1000;
while (i < num_runs) : (i += 1) {
const timer = std.time.Timer.start() catch unreachable;
const parts = func(rules, template);
std.mem.doNotOptimizeAway(&parts);
const lap_time = timer.read();
if (best_time > lap_time) best_time = lap_time;
total_time += lap_time;
}
print("min {} avg {} {s}\n", .{best_time, total_time / num_runs, name});
}
fn eager(rules: Rules, template: []const u8) Parts {
const pairs = rules.keys();
const inserts = rules.values();
const links = gpa.alloc(Link, pairs.len) catch unreachable;
defer gpa.free(links);
var counts = gpa.alloc(CountsArr, pairs.len) catch unreachable;
defer gpa.free(counts);
var next_counts = gpa.alloc(CountsArr, pairs.len) catch unreachable;
defer gpa.free(next_counts);
for (links) |*link, i| {
link.left = @intCast(u8, rules.getIndex(.{pairs[i][0], inserts[i]}).?);
link.right = @intCast(u8, rules.getIndex(.{inserts[i], pairs[i][1]}).?);
std.mem.set(u64, &counts[i], 0);
counts[i][pairs[i][0] - 'A'] = 1;
}
var depth: usize = 0;
while (depth < 10) : (depth += 1) {
for (links) |link, i| {
const left: CountsVec = counts[link.left];
const right: CountsVec = counts[link.right];
next_counts[i] = left + right;
}
const tmp = counts;
counts = next_counts;
next_counts = tmp;
}
const part1 = calcScore(template, rules, counts);
while (depth < 40) : (depth += 1) {
for (links) |link, i| {
const left: CountsVec = counts[link.left];
const right: CountsVec = counts[link.right];
next_counts[i] = left + right;
}
const tmp = counts;
counts = next_counts;
next_counts = tmp;
}
const part2 = calcScore(template, rules, counts);
return .{ .part1 = part1, .part2 = part2 };
}
fn fungible(rules: Rules, template: []const u8) Parts {
const pairs = rules.keys();
const inserts = rules.values();
const links = gpa.alloc(Link, pairs.len) catch unreachable;
defer gpa.free(links);
var counts = gpa.alloc(u64, pairs.len) catch unreachable;
defer gpa.free(counts);
var next_counts = gpa.alloc(u64, pairs.len) catch unreachable;
defer gpa.free(next_counts);
for (links) |*link, i| {
link.left = @intCast(u8, rules.getIndex(.{pairs[i][0], inserts[i]}).?);
link.right = @intCast(u8, rules.getIndex(.{inserts[i], pairs[i][1]}).?);
}
std.mem.set(u64, counts, 0);
for (template[0..template.len-1]) |_, i| {
const pair = template[i..][0..2].*;
const idx = rules.getIndex(pair).?;
counts[idx] += 1;
}
var depth: usize = 0;
while (depth < 10) : (depth += 1) {
std.mem.set(u64, next_counts, 0);
for (links) |link, i| {
const amt = counts[i];
next_counts[link.left] += amt;
next_counts[link.right] += amt;
}
const tmp = counts;
counts = next_counts;
next_counts = tmp;
}
const part1 = calcScoreForward(pairs, counts, template[template.len-1]);
while (depth < 40) : (depth += 1) {
std.mem.set(u64, next_counts, 0);
for (links) |link, i| {
const amt = counts[i];
next_counts[link.left] += amt;
next_counts[link.right] += amt;
}
const tmp = counts;
counts = next_counts;
next_counts = tmp;
}
const part2 = calcScoreForward(pairs, counts, template[template.len-1]);
return .{ .part1 = part1, .part2 = part2 };
}
fn calcScoreForward(pairs: []const [2]u8, counts: []const u64, last_char: u8) u64 {
var scores = std.mem.zeroes([26]usize);
for (counts) |c, i| {
scores[pairs[i][0] - 'A'] += c;
}
scores[last_char - 'A'] += 1;
var max_count: u64 = 0;
var min_count: u64 = std.math.maxInt(u64);
for (scores) |c| {
if (c != 0 and c < min_count) {
min_count = c;
}
if (c > max_count) {
max_count = c;
}
}
return max_count - min_count;
}
fn lazy(rules: Rules, template: []const u8) Parts {
var map = Memo.init(gpa);
defer map.deinit();
const part1 = calcScoreAtDepth(&map, template, rules, 10);
const part2 = calcScoreAtDepth(&map, template, rules, 40);
return .{ .part1 = part1, .part2 = part2 };
}
fn calcScore(template: []const u8, rules: Rules, counts: []const CountsArr) u64 {
var total_counts = std.mem.zeroes(CountsVec);
for (template[0..template.len-1]) |_, i| {
const pair = template[i..][0..2].*;
const index = rules.getIndex(pair).?;
const pair_counts: CountsVec = counts[index];
total_counts += pair_counts;
}
var counts_arr: CountsArr = total_counts;
counts_arr[template[template.len-1] - 'A'] += 1;
var max_count: u64 = 0;
var min_count: u64 = std.math.maxInt(u64);
for (counts_arr) |c| {
if (c != 0 and c < min_count) {
min_count = c;
}
if (c > max_count) {
max_count = c;
}
}
return max_count - min_count;
}
fn calcScoreAtDepth(map: *Memo, template: []const u8, rules: Rules, depth: u8) u64 {
const counts: [26]u64 = blk: {
var counts: Result = std.mem.zeroes(Result);
var i: usize = 0;
while (i < template.len - 1) : (i += 1) {
const key = template[i..][0..2].*;
counts += count(map, rules, key, depth);
}
counts[template[template.len-1] - 'A'] += 1;
break :blk counts;
};
var max_count: u64 = 0;
var min_count: u64 = std.math.maxInt(u64);
for (counts) |c| {
if (c != 0 and c < min_count) {
min_count = c;
}
if (c > max_count) {
max_count = c;
}
}
return max_count - min_count;
}
fn count(map: *Memo, rules: Rules, pair: [2]u8, depth: u8) Result {
if (depth == 0) {
var result = std.mem.zeroes(Result);
result[pair[0] - 'A'] = 1;
return result;
}
if (map.get(.{ .pair = pair, .depth = depth })) |val| return val;
const insert = rules.get(pair).?;
const result = count(map, rules, .{pair[0], insert}, depth - 1) +
count(map, rules, .{insert, pair[1]}, depth - 1);
map.put(.{ .pair = pair, .depth = depth}, result) catch unreachable;
return result;
}
// Useful stdlib functions
const tokenize = std.mem.tokenize;
const split = std.mem.split;
const indexOf = std.mem.indexOfScalar;
const indexOfAny = std.mem.indexOfAny;
const indexOfStr = std.mem.indexOfPosLinear;
const lastIndexOf = std.mem.lastIndexOfScalar;
const lastIndexOfAny = std.mem.lastIndexOfAny;
const lastIndexOfStr = std.mem.lastIndexOfLinear;
const trim = std.mem.trim;
const sliceMin = std.mem.min;
const sliceMax = std.mem.max;
const eql = std.mem.eql;
const parseEnum = std.meta.stringToEnum;
const parseInt = std.fmt.parseInt;
const parseFloat = std.fmt.parseFloat;
const min = std.math.min;
const min3 = std.math.min3;
const max = std.math.max;
const max3 = std.math.max3;
const print = std.debug.print;
const assert = std.debug.assert;
const sort = std.sort.sort;
const asc = std.sort.asc;
const desc = std.sort.desc; | src/day14.zig |
const std = @import("std");
const expectEqualStrings = std.testing.expectEqualStrings;
pub fn fromPascalCase(allocator: std.mem.Allocator, name: []const u8) ![]u8 {
const rc = try allocator.alloc(u8, name.len * 2); // This is overkill, but is > the maximum length possibly needed
errdefer allocator.free(rc);
var utf8_name = (std.unicode.Utf8View.init(name) catch unreachable).iterator();
var target_inx: u64 = 0;
var curr_char = (try isAscii(utf8_name.nextCodepoint())).?;
target_inx = setNext(lowercase(curr_char), rc, target_inx);
var prev_char = curr_char;
if (try isAscii(utf8_name.nextCodepoint())) |ch| {
curr_char = ch;
} else {
// Single character only - we're done here
_ = setNext(0, rc, target_inx);
return rc[0..target_inx];
}
while (try isAscii(utf8_name.nextCodepoint())) |next_char| {
if (next_char == ' ') {
// a space shouldn't be happening. But if it does, it clues us
// in pretty well:
//
// MyStuff Is Awesome
// |^
// |next_char
// ^
// prev_codepoint/ascii_prev_char (and target_inx)
target_inx = setNext(lowercase(curr_char), rc, target_inx);
target_inx = setNext('_', rc, target_inx);
curr_char = (try isAscii(utf8_name.nextCodepoint())).?;
target_inx = setNext(lowercase(curr_char), rc, target_inx);
prev_char = curr_char;
curr_char = (try isAscii(utf8_name.nextCodepoint())).?;
continue;
}
if (between(curr_char, 'A', 'Z')) {
if (isAcronym(curr_char, next_char)) {
// We could be in an acronym at the start of a word. This
// is the only case where we actually need to look back at the
// previous character, and if that's the case, throw in an
// underscore
// "SAMLMySAMLAcronymThing");
if (between(prev_char, 'a', 'z'))
target_inx = setNext('_', rc, target_inx);
//we are in an acronym - don't snake, just lower
target_inx = setNext(lowercase(curr_char), rc, target_inx);
} else {
target_inx = setNext('_', rc, target_inx);
target_inx = setNext(lowercase(curr_char), rc, target_inx);
}
} else {
target_inx = setNext(curr_char, rc, target_inx);
}
prev_char = curr_char;
curr_char = next_char;
}
// work in the last codepoint - force lowercase
target_inx = setNext(lowercase(curr_char), rc, target_inx);
rc[target_inx] = 0;
return rc[0..target_inx];
}
fn isAcronym(char1: u8, char2: u8) bool {
return isAcronymChar(char1) and isAcronymChar(char2);
}
fn isAcronymChar(char: u8) bool {
return between(char, 'A', 'Z') or between(char, '0', '9');
}
fn isAscii(codepoint: ?u21) !?u8 {
if (codepoint) |cp| {
if (cp > 0xff) return error.UnicodeNotSupported;
return @truncate(u8, cp);
}
return null;
}
fn setNext(ascii: u8, slice: []u8, inx: u64) u64 {
slice[inx] = ascii;
return inx + 1;
}
fn lowercase(ascii: u8) u8 {
var lowercase_char = ascii;
if (between(ascii, 'A', 'Z'))
lowercase_char = ascii + ('a' - 'A');
return lowercase_char;
}
fn between(char: u8, from: u8, to: u8) bool {
return char >= from and char <= to;
}
test "converts from PascalCase to snake_case" {
const allocator = std.testing.allocator;
const snake_case = try fromPascalCase(allocator, "MyPascalCaseThing");
defer allocator.free(snake_case);
try expectEqualStrings("my_pascal_case_thing", snake_case);
}
test "handles from PascalCase acronyms to snake_case" {
const allocator = std.testing.allocator;
const snake_case = try fromPascalCase(allocator, "SAMLMySAMLAcronymThing");
defer allocator.free(snake_case);
try expectEqualStrings("saml_my_saml_acronym_thing", snake_case);
}
test "spaces in the name" {
const allocator = std.testing.allocator;
const snake_case = try fromPascalCase(allocator, "API Gateway");
defer allocator.free(snake_case);
try expectEqualStrings("api_gateway", snake_case);
}
test "S3" {
const allocator = std.testing.allocator;
const snake_case = try fromPascalCase(allocator, "S3");
defer allocator.free(snake_case);
try expectEqualStrings("s3", snake_case);
}
test "ec2" {
const allocator = std.testing.allocator;
const snake_case = try fromPascalCase(allocator, "EC2");
defer allocator.free(snake_case);
try expectEqualStrings("ec2", snake_case);
}
test "IoT 1Click Devices Service" {
const allocator = std.testing.allocator;
const snake_case = try fromPascalCase(allocator, "IoT 1Click Devices Service");
defer allocator.free(snake_case);
// NOTE: There is some debate amoung humans about what this should
// turn into. Should it be iot_1click_... or iot_1_click...?
try expectEqualStrings("iot_1_click_devices_service", snake_case);
} | codegen/src/snake.zig |
const Trie = @This();
const std = @import("std");
const mem = std.mem;
const leb = std.leb;
const log = std.log.scoped(.link);
const testing = std.testing;
const assert = std.debug.assert;
const Allocator = mem.Allocator;
pub const Symbol = struct {
name: []const u8,
vmaddr_offset: u64,
export_flags: u64,
};
const Edge = struct {
from: *Node,
to: *Node,
label: []const u8,
fn deinit(self: *Edge, alloc: *Allocator) void {
self.to.deinit(alloc);
alloc.destroy(self.to);
self.from = undefined;
self.to = undefined;
}
};
const Node = struct {
/// Export flags associated with this exported symbol (if any).
export_flags: ?u64 = null,
/// VM address offset wrt to the section this symbol is defined against (if any).
vmaddr_offset: ?u64 = null,
/// Offset of this node in the trie output byte stream.
trie_offset: ?usize = null,
/// List of all edges originating from this node.
edges: std.ArrayListUnmanaged(Edge) = .{},
fn deinit(self: *Node, alloc: *Allocator) void {
for (self.edges.items) |*edge| {
edge.deinit(alloc);
}
self.edges.deinit(alloc);
}
const PutResult = struct {
/// Node reached at this stage of `put` op.
node: *Node,
/// Count of newly inserted nodes at this stage of `put` op.
node_count: usize,
};
/// Inserts a new node starting from `self`.
fn put(self: *Node, alloc: *Allocator, label: []const u8, node_count: usize) !PutResult {
var curr_node_count = node_count;
// Check for match with edges from this node.
for (self.edges.items) |*edge| {
const match = mem.indexOfDiff(u8, edge.label, label) orelse return PutResult{
.node = edge.to,
.node_count = curr_node_count,
};
if (match == 0) continue;
if (match == edge.label.len) return edge.to.put(alloc, label[match..], curr_node_count);
// Found a match, need to splice up nodes.
// From: A -> B
// To: A -> C -> B
const mid = try alloc.create(Node);
mid.* = .{};
const to_label = edge.label;
const to_node = edge.to;
edge.to = mid;
edge.label = label[0..match];
curr_node_count += 1;
try mid.edges.append(alloc, .{
.from = mid,
.to = to_node,
.label = to_label[match..],
});
if (match == label.len) {
return PutResult{ .node = to_node, .node_count = curr_node_count };
} else {
return mid.put(alloc, label[match..], curr_node_count);
}
}
// Add a new node.
const node = try alloc.create(Node);
node.* = .{};
curr_node_count += 1;
try self.edges.append(alloc, .{
.from = self,
.to = node,
.label = label,
});
return PutResult{ .node = node, .node_count = curr_node_count };
}
/// This method should only be called *after* updateOffset has been called!
/// In case this is not upheld, this method will panic.
fn writeULEB128Mem(self: Node, buffer: *std.ArrayListUnmanaged(u8)) !void {
assert(self.trie_offset != null); // You need to call updateOffset first.
if (self.vmaddr_offset) |offset| {
// Terminal node info: encode export flags and vmaddr offset of this symbol.
var info_buf_len: usize = 0;
var info_buf: [@sizeOf(u64) * 2]u8 = undefined;
var info_stream = std.io.fixedBufferStream(&info_buf);
try leb.writeULEB128(info_stream.writer(), self.export_flags.?);
try leb.writeULEB128(info_stream.writer(), offset);
// Encode the size of the terminal node info.
var size_buf: [@sizeOf(u64)]u8 = undefined;
var size_stream = std.io.fixedBufferStream(&size_buf);
try leb.writeULEB128(size_stream.writer(), info_stream.pos);
// Now, write them to the output buffer.
buffer.appendSliceAssumeCapacity(size_buf[0..size_stream.pos]);
buffer.appendSliceAssumeCapacity(info_buf[0..info_stream.pos]);
} else {
// Non-terminal node is delimited by 0 byte.
buffer.appendAssumeCapacity(0);
}
// Write number of edges (max legal number of edges is 256).
buffer.appendAssumeCapacity(@intCast(u8, self.edges.items.len));
for (self.edges.items) |edge| {
// Write edges labels.
buffer.appendSliceAssumeCapacity(edge.label);
buffer.appendAssumeCapacity(0);
var buf: [@sizeOf(u64)]u8 = undefined;
var buf_stream = std.io.fixedBufferStream(&buf);
try leb.writeULEB128(buf_stream.writer(), edge.to.trie_offset.?);
buffer.appendSliceAssumeCapacity(buf[0..buf_stream.pos]);
}
}
const UpdateResult = struct {
/// Current size of this node in bytes.
node_size: usize,
/// True if the trie offset of this node in the output byte stream
/// would need updating; false otherwise.
updated: bool,
};
/// Updates offset of this node in the output byte stream.
fn updateOffset(self: *Node, offset: usize) UpdateResult {
var node_size: usize = 0;
if (self.vmaddr_offset) |vmaddr| {
node_size += sizeULEB128Mem(self.export_flags.?);
node_size += sizeULEB128Mem(vmaddr);
node_size += sizeULEB128Mem(node_size);
} else {
node_size += 1; // 0x0 for non-terminal nodes
}
node_size += 1; // 1 byte for edge count
for (self.edges.items) |edge| {
const next_node_offset = edge.to.trie_offset orelse 0;
node_size += edge.label.len + 1 + sizeULEB128Mem(next_node_offset);
}
const trie_offset = self.trie_offset orelse 0;
const updated = offset != trie_offset;
self.trie_offset = offset;
return .{ .node_size = node_size, .updated = updated };
}
/// Calculates number of bytes in ULEB128 encoding of value.
fn sizeULEB128Mem(value: u64) usize {
var res: usize = 0;
var v = value;
while (true) {
v = v >> 7;
res += 1;
if (v == 0) break;
}
return res;
}
};
/// Count of nodes in the trie.
/// The count is updated at every `put` call.
/// The trie always consists of at least a root node, hence
/// the count always starts at 1.
node_count: usize = 1,
/// The root node of the trie.
root: Node = .{},
/// Insert a symbol into the trie, updating the prefixes in the process.
/// This operation may change the layout of the trie by splicing edges in
/// certain circumstances.
pub fn put(self: *Trie, alloc: *Allocator, symbol: Symbol) !void {
const res = try self.root.put(alloc, symbol.name, 0);
self.node_count += res.node_count;
res.node.vmaddr_offset = symbol.vmaddr_offset;
res.node.export_flags = symbol.export_flags;
}
/// Write the trie to a buffer ULEB128 encoded.
pub fn writeULEB128Mem(self: *Trie, alloc: *Allocator, buffer: *std.ArrayListUnmanaged(u8)) !void {
var ordered_nodes: std.ArrayListUnmanaged(*Node) = .{};
defer ordered_nodes.deinit(alloc);
try ordered_nodes.ensureCapacity(alloc, self.node_count);
walkInOrder(&self.root, &ordered_nodes);
var offset: usize = 0;
var more: bool = true;
while (more) {
offset = 0;
more = false;
for (ordered_nodes.items) |node| {
const res = node.updateOffset(offset);
offset += res.node_size;
if (res.updated) more = true;
}
}
try buffer.ensureCapacity(alloc, buffer.items.len + offset);
for (ordered_nodes.items) |node| {
try node.writeULEB128Mem(buffer);
}
}
/// Walks the trie in DFS order gathering all nodes into a linear stream of nodes.
fn walkInOrder(node: *Node, list: *std.ArrayListUnmanaged(*Node)) void {
list.appendAssumeCapacity(node);
for (node.edges.items) |*edge| {
walkInOrder(edge.to, list);
}
}
pub fn deinit(self: *Trie, alloc: *Allocator) void {
self.root.deinit(alloc);
}
test "Trie node count" {
var gpa = testing.allocator;
var trie: Trie = .{};
defer trie.deinit(gpa);
testing.expectEqual(trie.node_count, 1);
try trie.put(gpa, .{
.name = "_main",
.vmaddr_offset = 0,
.export_flags = 0,
});
testing.expectEqual(trie.node_count, 2);
// Inserting the same node shouldn't update the trie.
try trie.put(gpa, .{
.name = "_main",
.vmaddr_offset = 0,
.export_flags = 0,
});
testing.expectEqual(trie.node_count, 2);
try trie.put(gpa, .{
.name = "__mh_execute_header",
.vmaddr_offset = 0x1000,
.export_flags = 0,
});
testing.expectEqual(trie.node_count, 4);
// Inserting the same node shouldn't update the trie.
try trie.put(gpa, .{
.name = "__mh_execute_header",
.vmaddr_offset = 0x1000,
.export_flags = 0,
});
testing.expectEqual(trie.node_count, 4);
try trie.put(gpa, .{
.name = "_main",
.vmaddr_offset = 0,
.export_flags = 0,
});
testing.expectEqual(trie.node_count, 4);
}
test "Trie basic" {
var gpa = testing.allocator;
var trie: Trie = .{};
defer trie.deinit(gpa);
// root
testing.expect(trie.root.edges.items.len == 0);
// root --- _st ---> node
try trie.put(gpa, .{
.name = "_st",
.vmaddr_offset = 0,
.export_flags = 0,
});
testing.expect(trie.root.edges.items.len == 1);
testing.expect(mem.eql(u8, trie.root.edges.items[0].label, "_st"));
{
// root --- _st ---> node --- art ---> node
try trie.put(gpa, .{
.name = "_start",
.vmaddr_offset = 0,
.export_flags = 0,
});
testing.expect(trie.root.edges.items.len == 1);
const nextEdge = &trie.root.edges.items[0];
testing.expect(mem.eql(u8, nextEdge.label, "_st"));
testing.expect(nextEdge.to.edges.items.len == 1);
testing.expect(mem.eql(u8, nextEdge.to.edges.items[0].label, "art"));
}
{
// root --- _ ---> node --- st ---> node --- art ---> node
// |
// | --- main ---> node
try trie.put(gpa, .{
.name = "_main",
.vmaddr_offset = 0,
.export_flags = 0,
});
testing.expect(trie.root.edges.items.len == 1);
const nextEdge = &trie.root.edges.items[0];
testing.expect(mem.eql(u8, nextEdge.label, "_"));
testing.expect(nextEdge.to.edges.items.len == 2);
testing.expect(mem.eql(u8, nextEdge.to.edges.items[0].label, "st"));
testing.expect(mem.eql(u8, nextEdge.to.edges.items[1].label, "main"));
const nextNextEdge = &nextEdge.to.edges.items[0];
testing.expect(mem.eql(u8, nextNextEdge.to.edges.items[0].label, "art"));
}
}
test "Trie.writeULEB128Mem" {
var gpa = testing.allocator;
var trie: Trie = .{};
defer trie.deinit(gpa);
try trie.put(gpa, .{
.name = "__mh_execute_header",
.vmaddr_offset = 0,
.export_flags = 0,
});
try trie.put(gpa, .{
.name = "_main",
.vmaddr_offset = 0x1000,
.export_flags = 0,
});
var buffer: std.ArrayListUnmanaged(u8) = .{};
defer buffer.deinit(gpa);
try trie.writeULEB128Mem(gpa, &buffer);
const exp_buffer = [_]u8{
0x0,
0x1,
0x5f,
0x0,
0x5,
0x0,
0x2,
0x5f,
0x6d,
0x68,
0x5f,
0x65,
0x78,
0x65,
0x63,
0x75,
0x74,
0x65,
0x5f,
0x68,
0x65,
0x61,
0x64,
0x65,
0x72,
0x0,
0x21,
0x6d,
0x61,
0x69,
0x6e,
0x0,
0x25,
0x2,
0x0,
0x0,
0x0,
0x3,
0x0,
0x80,
0x20,
0x0,
};
testing.expect(buffer.items.len == exp_buffer.len);
testing.expect(mem.eql(u8, buffer.items, exp_buffer[0..]));
} | src/link/MachO/Trie.zig |
const haversine = @import("haversine.zig");
const std = @import("std");
const math = std.math;
const testing = std.testing;
const epsilon = 0.000001;
test "Miami, FL to New York, NY" {
const miami = haversine.LatLng{
.lat = 25.761681,
.lng = -80.191788,
};
const ny = haversine.LatLng{
.lat = 40.730610,
.lng = -73.935242,
};
const dist = try haversine.calculateEarthDistance(miami, ny);
testing.expect(math.approxEq(f64, dist, 1761.9483035850824, epsilon));
}
test "Oxford, UK to Paris, France" {
const oxford = haversine.LatLng{
.lat = 51.752022,
.lng = -1.257677,
};
const paris = haversine.LatLng{
.lat = 48.864716,
.lng = 2.349014,
};
const dist = try haversine.calculateEarthDistance(oxford, paris);
testing.expect(math.approxEq(f64, dist, 410.59884571460276, epsilon));
}
test "Copenhagen, Denmark to Berlin, Germany" {
const copenhagen = haversine.LatLng{
.lat = 55.676098,
.lng = 12.568337,
};
const berlin = haversine.LatLng{
.lat = 52.520008,
.lng = 13.404954,
};
const dist = try haversine.calculateEarthDistance(copenhagen, berlin);
testing.expect(math.approxEq(f64, dist, 355.1490195853771, epsilon));
}
test "Tokyo, Japan to Sydney, Australia" {
const tokyo = haversine.LatLng{
.lat = 35.652832,
.lng = 139.839478,
};
const sydney = haversine.LatLng{
.lat = -33.865143,
.lng = 151.209900,
};
const dist = try haversine.calculateEarthDistance(tokyo, sydney);
testing.expect(math.approxEq(f64, dist, 7819.885147555453, epsilon));
}
test "Johannesburg, South Africa to Jakarta, Indonesia" {
const johannesburg = haversine.LatLng{
.lat = -26.195246,
.lng = 28.034088,
};
const jakarta = haversine.LatLng{
.lat = -6.200000,
.lng = 106.816666,
};
const dist = try haversine.calculateEarthDistance(johannesburg, jakarta);
testing.expect(math.approxEq(f64, dist, 8586.494573575452, epsilon));
}
test "Error on invalid radius" {
const coord1 = haversine.LatLng{
.lat = 5.0,
.lng = 5.0,
};
const coord2 = haversine.LatLng{
.lat = 0.0,
.lng = 0.0,
};
testing.expectError(error.InvalidRadius, haversine.calculateDistance(coord1, coord2, -1.0));
}
test "Invalid latitude on coordinate 1" {
const coord1 = haversine.LatLng{
.lat = -100.0,
.lng = 5.0,
};
const coord2 = haversine.LatLng{
.lat = 0.0,
.lng = 0.0,
};
testing.expectError(error.InvalidLatitude, haversine.calculateEarthDistance(coord1, coord2));
}
test "Invalid longitude on coordinate 1" {
const coord1 = haversine.LatLng{
.lat = 5.0,
.lng = 200.0,
};
const coord2 = haversine.LatLng{
.lat = 0.0,
.lng = 0.0,
};
testing.expectError(error.InvalidLongitude, haversine.calculateEarthDistance(coord1, coord2));
} | haversine_test.zig |
const util = @import("../sdf_util.zig");
pub const info: util.SdfInfo = .{
.name = "Capped Cylinder",
.data_size = @sizeOf(Data),
.function_definition = function_definition,
.enter_command_fn = util.surfaceEnterCommand(Data),
.exit_command_fn = util.surfaceExitCommand(Data, exitCommand),
.append_mat_check_fn = util.surfaceMatCheckCommand(Data),
.sphere_bound_fn = sphereBound,
};
pub const Data = struct {
start: util.math.vec3,
end: util.math.vec3,
radius: f32,
mat: usize,
};
const function_definition: []const u8 =
\\float sdCappedCylinder(vec3 p, vec3 a, vec3 b, float r){
\\ vec3 ba = b - a;
\\ vec3 pa = p - a;
\\ float baba = dot(ba,ba);
\\ float paba = dot(pa,ba);
\\ float x = length(pa*baba-ba*paba) - r * baba;
\\ float y = abs(paba - baba * .5) - baba * .5;
\\ float x2 = x*x;
\\ float y2 = y*y*baba;
\\ float d = (max(x,y)<0.)?-min(x2,y2):(((x>0.)?x2:0.)+((y>0.)?y2:0.));
\\ return sign(d)*sqrt(abs(d))/baba;
\\}
\\
;
fn exitCommand(data: *Data, enter_index: usize, cur_point_name: []const u8, allocator: util.std.mem.Allocator) []const u8 {
const format: []const u8 = "float d{d} = sdCappedCylinder({s}, vec3({d:.5},{d:.5},{d:.5}),vec3({d:.5},{d:.5},{d:.5}),{d:.5});";
return util.std.fmt.allocPrint(allocator, format, .{
enter_index,
cur_point_name,
data.start[0],
data.start[1],
data.start[2],
data.end[0],
data.end[1],
data.end[2],
data.radius,
}) catch unreachable;
}
fn sphereBound(buffer: *[]u8, bound: *util.math.sphereBound, children: []util.math.sphereBound) void {
_ = children;
const data: *Data = @ptrCast(*Data, @alignCast(@alignOf(Data), buffer.ptr));
bound.* = util.math.SphereBound.merge(
.{
.pos = data.start,
.r = data.radius,
},
.{
.pos = data.end,
.r = data.radius,
},
);
} | src/sdf/surfaces/capped_cylinder.zig |
const std = @import("std");
const utils = @import("utils.zig");
const SparseSet = @import("sparse_set.zig").SparseSet;
const Signal = @import("../signals/signal.zig").Signal;
const Sink = @import("../signals/sink.zig").Sink;
/// Stores an ArrayList of components along with a SparseSet of entities
pub fn ComponentStorage(comptime Component: type, comptime Entity: type) type {
std.debug.assert(!utils.isComptime(Component));
// empty (zero-sized) structs will not have an array created
const is_empty_struct = @sizeOf(Component) == 0;
// HACK: due to this being stored as untyped ptrs, when deinit is called we are casted to a Component of some random
// non-zero sized type. That will make is_empty_struct false in deinit always so we can't use it. Instead, we stick
// a small dummy struct in the instances ArrayList so it can safely be deallocated.
// Perhaps we should just allocate instances with a dummy allocator or the tmp allocator?
comptime var ComponentOrDummy = if (is_empty_struct) struct { dummy: u1 } else Component;
return struct {
const Self = @This();
set: *SparseSet(Entity),
instances: std.ArrayList(ComponentOrDummy),
allocator: ?std.mem.Allocator,
/// doesnt really belong here...used to denote group ownership
super: usize = 0,
safeDeinit: fn (*Self) void,
safeSwap: fn (*Self, Entity, Entity, bool) void,
safeRemoveIfContains: fn (*Self, Entity) void,
construction: Signal(Entity),
update: Signal(Entity),
destruction: Signal(Entity),
pub fn init(allocator: std.mem.Allocator) Self {
var store = Self{
.set = SparseSet(Entity).initPtr(allocator),
.instances = undefined,
.safeDeinit = struct {
fn deinit(self: *Self) void {
if (!is_empty_struct) {
self.instances.deinit();
}
}
}.deinit,
.safeSwap = struct {
fn swap(self: *Self, lhs: Entity, rhs: Entity, instances_only: bool) void {
if (!is_empty_struct) {
std.mem.swap(Component, &self.instances.items[self.set.index(lhs)], &self.instances.items[self.set.index(rhs)]);
}
if (!instances_only) self.set.swap(lhs, rhs);
}
}.swap,
.safeRemoveIfContains = struct {
fn removeIfContains(self: *Self, entity: Entity) void {
if (self.contains(entity)) {
self.remove(entity);
}
}
}.removeIfContains,
.allocator = null,
.construction = Signal(Entity).init(allocator),
.update = Signal(Entity).init(allocator),
.destruction = Signal(Entity).init(allocator),
};
if (!is_empty_struct) {
store.instances = std.ArrayList(ComponentOrDummy).init(allocator);
}
return store;
}
pub fn initPtr(allocator: std.mem.Allocator) *Self {
var store = allocator.create(Self) catch unreachable;
store.set = SparseSet(Entity).initPtr(allocator);
if (!is_empty_struct) {
store.instances = std.ArrayList(ComponentOrDummy).init(allocator);
}
store.allocator = allocator;
store.super = 0;
store.construction = Signal(Entity).init(allocator);
store.update = Signal(Entity).init(allocator);
store.destruction = Signal(Entity).init(allocator);
// since we are stored as a pointer, we need to catpure this
store.safeDeinit = struct {
fn deinit(self: *Self) void {
if (!is_empty_struct) {
self.instances.deinit();
}
}
}.deinit;
store.safeSwap = struct {
fn swap(self: *Self, lhs: Entity, rhs: Entity, instances_only: bool) void {
if (!is_empty_struct) {
std.mem.swap(Component, &self.instances.items[self.set.index(lhs)], &self.instances.items[self.set.index(rhs)]);
}
if (!instances_only) self.set.swap(lhs, rhs);
}
}.swap;
store.safeRemoveIfContains = struct {
fn removeIfContains(self: *Self, entity: Entity) void {
if (self.contains(entity)) {
self.remove(entity);
}
}
}.removeIfContains;
return store;
}
pub fn deinit(self: *Self) void {
// great care must be taken here. Due to how Registry keeps this struct as pointers anything touching a type
// will be wrong since it has to cast to a random struct when deiniting. Because of all that, is_empty_struct
// will allways be false here so we have to deinit the instances no matter what.
self.safeDeinit(self);
self.set.deinit();
self.construction.deinit();
self.update.deinit();
self.destruction.deinit();
if (self.allocator) |allocator| {
allocator.destroy(self);
}
}
pub fn onConstruct(self: *Self) Sink(Entity) {
return self.construction.sink();
}
pub fn onUpdate(self: *Self) Sink(Entity) {
return self.update.sink();
}
pub fn onDestruct(self: *Self) Sink(Entity) {
return self.destruction.sink();
}
/// Increases the capacity of a component storage
pub fn reserve(self: *Self, cap: usize) void {
self.set.reserve(cap);
if (!is_empty_struct) {
self.instances.items.reserve(cap);
}
}
/// Assigns an entity to a storage and assigns its object
pub fn add(self: *Self, entity: Entity, value: Component) void {
if (!is_empty_struct) {
_ = self.instances.append(value) catch unreachable;
}
self.set.add(entity);
self.construction.publish(entity);
}
/// Removes an entity from a storage
pub fn remove(self: *Self, entity: Entity) void {
self.destruction.publish(entity);
if (!is_empty_struct) {
_ = self.instances.swapRemove(self.set.index(entity));
}
self.set.remove(entity);
}
/// Checks if a view contains an entity
pub fn contains(self: Self, entity: Entity) bool {
return self.set.contains(entity);
}
pub fn removeIfContains(self: *Self, entity: Entity) void {
if (Component == u1) {
self.safeRemoveIfContains(self, entity);
} else if (self.contains(entity)) {
self.remove(entity);
}
}
pub fn len(self: Self) usize {
return self.set.len();
}
pub usingnamespace if (is_empty_struct)
struct {
/// Sort Entities according to the given comparison function. Only T == Entity is allowed. The constraint param only exists for
/// parity with non-empty Components
pub fn sort(self: Self, comptime T: type, context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool) void {
std.debug.assert(T == Entity);
self.set.sort(context, lessThan);
}
}
else
struct {
/// Direct access to the array of objects
pub fn raw(self: Self) []Component {
return self.instances.items;
}
/// Replaces the given component for an entity
pub fn replace(self: *Self, entity: Entity, value: Component) void {
self.get(entity).* = value;
self.update.publish(entity);
}
/// Returns the object associated with an entity
pub fn get(self: *Self, entity: Entity) *Component {
std.debug.assert(self.contains(entity));
return &self.instances.items[self.set.index(entity)];
}
pub fn getConst(self: *Self, entity: Entity) Component {
return self.instances.items[self.set.index(entity)];
}
/// Returns a pointer to the object associated with an entity, if any.
pub fn tryGet(self: *Self, entity: Entity) ?*Component {
return if (self.set.contains(entity)) &self.instances.items[self.set.index(entity)] else null;
}
pub fn tryGetConst(self: *Self, entity: Entity) ?Component {
return if (self.set.contains(entity)) self.instances.items[self.set.index(entity)] else null;
}
/// Sort Entities or Components according to the given comparison function. Valid types for T are Entity or Component.
pub fn sort(self: *Self, comptime T: type, length: usize, context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool) void {
std.debug.assert(T == Entity or T == Component);
// we have to perform a swap after the sort for all moved entities so we make a helper struct for that. In the
// case of a Component sort we also wrap that into the struct so we can get the Component data to pass to the
// lessThan method passed in.
if (T == Entity) {
const SortContext = struct {
storage: *Self,
pub fn swap(this: @This(), a: Entity, b: Entity) void {
this.storage.safeSwap(this.storage, a, b, true);
}
};
const swap_context = SortContext{ .storage = self };
self.set.arrange(length, context, lessThan, swap_context);
} else {
const SortContext = struct {
storage: *Self,
wrapped_context: @TypeOf(context),
lessThan: fn (@TypeOf(context), T, T) bool,
fn sort(this: @This(), a: Entity, b: Entity) bool {
const real_a = this.storage.getConst(a);
const real_b = this.storage.getConst(b);
return this.lessThan(this.wrapped_context, real_a, real_b);
}
pub fn swap(this: @This(), a: Entity, b: Entity) void {
this.storage.safeSwap(this.storage, a, b, true);
}
};
const swap_context = SortContext{ .storage = self, .wrapped_context = context, .lessThan = lessThan };
self.set.arrange(length, swap_context, SortContext.sort, swap_context);
}
}
};
/// Direct access to the array of entities
pub fn data(self: Self) []const Entity {
return self.set.data();
}
/// Direct access to the array of entities
pub fn dataPtr(self: Self) *const []Entity {
return self.set.dataPtr();
}
/// Swaps entities and objects in the internal packed arrays
pub fn swap(self: *Self, lhs: Entity, rhs: Entity) void {
self.safeSwap(self, lhs, rhs, false);
}
pub fn clear(self: *Self) void {
if (!is_empty_struct) {
self.instances.items.len = 0;
}
self.set.clear();
}
};
}
test "add/try-get/remove/clear" {
var store = ComponentStorage(f32, u32).init(std.testing.allocator);
defer store.deinit();
store.add(3, 66.45);
try std.testing.expectEqual(store.tryGetConst(3).?, 66.45);
if (store.tryGet(3)) |found| {
try std.testing.expectEqual(@as(f32, 66.45), found.*);
}
store.remove(3);
var val_null = store.tryGet(3);
try std.testing.expectEqual(val_null, null);
store.clear();
}
test "add/get/remove" {
var store = ComponentStorage(f32, u32).init(std.testing.allocator);
defer store.deinit();
store.add(3, 66.45);
if (store.tryGet(3)) |found| try std.testing.expectEqual(@as(f32, 66.45), found.*);
try std.testing.expectEqual(store.tryGetConst(3).?, 66.45);
store.remove(3);
try std.testing.expectEqual(store.tryGet(3), null);
}
test "iterate" {
var store = ComponentStorage(f32, u32).initPtr(std.testing.allocator);
defer store.deinit();
store.add(3, 66.45);
store.add(5, 66.45);
store.add(7, 66.45);
for (store.data()) |entity, i| {
if (i == 0) {
try std.testing.expectEqual(entity, 3);
}
if (i == 1) {
try std.testing.expectEqual(entity, 5);
}
if (i == 2) {
try std.testing.expectEqual(entity, 7);
}
}
}
test "empty component" {
const Empty = struct {};
var store = ComponentStorage(Empty, u32).initPtr(std.testing.allocator);
defer store.deinit();
store.add(3, Empty{});
store.remove(3);
}
fn construct(e: u32) void {
std.debug.assert(e == 3);
}
fn update(e: u32) void {
std.debug.assert(e == 3);
}
fn destruct(e: u32) void {
std.debug.assert(e == 3);
}
test "signals" {
var store = ComponentStorage(f32, u32).init(std.testing.allocator);
defer store.deinit();
store.onConstruct().connect(construct);
store.onUpdate().connect(update);
store.onDestruct().connect(destruct);
store.add(3, 66.45);
store.replace(3, 45.64);
store.remove(3);
store.onConstruct().disconnect(construct);
store.onUpdate().disconnect(update);
store.onDestruct().disconnect(destruct);
store.add(4, 66.45);
store.replace(4, 45.64);
store.remove(4);
}
test "sort empty component" {
const Empty = struct {};
var store = ComponentStorage(Empty, u32).initPtr(std.testing.allocator);
defer store.deinit();
store.add(1, Empty{});
store.add(2, Empty{});
store.add(0, Empty{});
const asc_u32 = comptime std.sort.asc(u32);
store.sort(u32, {}, asc_u32);
for (store.data()) |e, i| {
try std.testing.expectEqual(@intCast(u32, i), e);
}
const desc_u32 = comptime std.sort.desc(u32);
store.sort(u32, {}, desc_u32);
var counter: u32 = 2;
for (store.data()) |e| {
try std.testing.expectEqual(counter, e);
if (counter > 0) counter -= 1;
}
}
test "sort by entity" {
var store = ComponentStorage(f32, u32).initPtr(std.testing.allocator);
defer store.deinit();
store.add(22, @as(f32, 2.2));
store.add(11, @as(f32, 1.1));
store.add(33, @as(f32, 3.3));
const SortContext = struct {
store: *ComponentStorage(f32, u32),
fn sort(this: @This(), a: u32, b: u32) bool {
const real_a = this.store.getConst(a);
const real_b = this.store.getConst(b);
return real_a > real_b;
}
};
const context = SortContext{ .store = store };
store.sort(u32, store.len(), context, SortContext.sort);
var compare: f32 = 5;
for (store.raw()) |val| {
try std.testing.expect(compare > val);
compare = val;
}
}
test "sort by component" {
var store = ComponentStorage(f32, u32).initPtr(std.testing.allocator);
defer store.deinit();
store.add(22, @as(f32, 2.2));
store.add(11, @as(f32, 1.1));
store.add(33, @as(f32, 3.3));
const desc_f32 = comptime std.sort.desc(f32);
store.sort(f32, store.len(), {}, desc_f32);
var compare: f32 = 5;
for (store.raw()) |val| {
try std.testing.expect(compare > val);
compare = val;
}
} | src/ecs/component_storage.zig |
const std = @import("std");
const print = std.debug.print;
pub fn main() anyerror!void {
const allocator = std.heap.page_allocator;
const input: []const u8 = "cqjxjnds";
var current: []u8 = try allocator.alloc(u8, 8);
std.mem.copy(u8, current, input);
var password_count: usize = 0;
outer: while (true) {
var prev_letter: ?u8 = null;
var is_increasing_straight_2 = false;
var has_increasing_straight_3 = false;
var in_pair = false;
var pair_count: usize = 0;
for (current) |c, i| {
if (c == 'i' or c == 'o' or c == 'l') {
next(¤t[0 .. i + 1]);
if (i < current.len - 1) {
for (current[i + 1 ..]) |*x| {
x.* = 'a';
}
}
continue :outer;
}
if (prev_letter) |p| {
if (p == c and !in_pair) {
in_pair = true;
pair_count += 1;
} else {
in_pair = false;
}
if (p + 1 == c) {
if (is_increasing_straight_2) {
has_increasing_straight_3 = true;
} else {
is_increasing_straight_2 = true;
}
} else {
is_increasing_straight_2 = false;
}
}
prev_letter = c;
}
if (has_increasing_straight_3 and pair_count >= 2) {
password_count += 1;
if (password_count == 1) {
print("Part 1: {s}\n", .{current});
} else if (password_count == 2) {
print("Part 2: {s}\n", .{current});
return;
}
next(¤t);
} else {
next(¤t);
}
}
}
fn next(input: *[]u8) void {
var i: usize = input.len - 1;
while (i >= 0) : (i -= 1) {
if (input.*[i] == 'z') {
input.*[i] = 'a';
} else {
input.*[i] += 1;
return;
}
}
} | src/day11.zig |
const std = @import("std");
const tools = @import("tools");
const with_trace = false;
const assert = std.debug.assert;
fn trace(comptime fmt: []const u8, args: anytype) void {
if (with_trace) std.debug.print(fmt, args);
}
const Vec2 = struct {
x: i32,
y: i32,
};
const Segment = struct {
o: Vec2,
d: Vec2,
l: i32,
L: i32,
};
fn parse_segments(insns: []const u8, pool: []Segment) []Segment {
var segments: u32 = 0;
var p = Vec2{ .x = 0, .y = 0 };
var L: i32 = 0;
var i: usize = 0;
while (i < insns.len) {
var dir = insns[i];
i += 1;
if (dir == ',') {
dir = insns[i];
i += 1;
}
var len: i32 = 0;
while (i < insns.len and insns[i] != ',') : (i += 1) {
len = len * 10 + @intCast(i32, insns[i] - '0');
}
var d = Vec2{ .x = 0, .y = 0 };
switch (dir) {
'L' => d.x = -1,
'R' => d.x = 1,
'U' => d.y = -1,
'D' => d.y = 1,
else => unreachable,
}
pool[segments] = Segment{
.o = p,
.d = d,
.l = len,
.L = L,
};
segments += 1;
L += len;
p.x += d.x * len;
p.y += d.y * len;
}
return pool[0..segments];
}
fn distance(p: Vec2) u32 {
return (if (p.x < 0) @intCast(u32, -p.x) else @intCast(u32, p.x)) + (if (p.y < 0) @intCast(u32, -p.y) else @intCast(u32, p.y));
}
const Intersec = struct {
dist: u32,
score: i32,
};
fn intersect(s1: *const Segment, s2: *const Segment) ?Intersec {
var isec: ?Intersec = null;
var l: i32 = 0;
while (l < s2.l) : (l += 1) {
const delta = Vec2{
.x = s2.o.x + s2.d.x * l - s1.o.x,
.y = s2.o.y + s2.d.y * l - s1.o.y,
};
const d = delta.x * s1.d.x + delta.y * s1.d.y;
if (d >= 0 and d < s1.l and d * s1.d.x == delta.x and d * s1.d.y == delta.y) {
const p = Vec2{ .x = d * s1.d.x + s1.o.x, .y = d * s1.d.y + s1.o.y };
trace("isec ({},{}) S1=[({},{})+{} * ({},{}), {}] S2=[({},{})+{} * ({},{}), {}]\n", .{
p.x, p.y,
s1.o.x, s1.o.y,
s1.l, s1.d.x,
s1.d.y, s1.L,
s2.o.x, s2.o.y,
s2.l, s2.d.x,
s2.d.y, s2.L,
});
if (p.x == 0 and p.y == 0)
continue;
const sc = s1.L + s2.L + d + l;
const dist = distance(p);
if (isec == null) {
isec = Intersec{ .dist = dist, .score = sc };
} else {
if (sc < isec.?.score)
isec.?.score = sc;
if (dist < isec.?.dist)
isec.?.dist = dist;
}
}
}
return isec;
}
fn intersects(segs1: []const Segment, segs2: []const Segment) ?Intersec {
var isec: ?Intersec = null;
for (segs2) |*sg2| {
for (segs1) |*sg1| {
if (intersect(sg1, sg2)) |it| {
if (isec == null) {
isec = it;
} else {
if (it.score < isec.?.score)
isec.?.score = it.score;
if (it.dist < isec.?.dist)
isec.?.dist = it.dist;
}
}
}
}
return isec;
}
pub fn run(input: []const u8, allocator: std.mem.Allocator) ![2][]const u8 {
var pool: [1000]Segment = undefined;
var it = std.mem.split(u8, input, "\n");
const l1 = std.mem.trim(u8, it.next() orelse unreachable, &std.ascii.spaces);
const l2 = std.mem.trim(u8, it.next() orelse unreachable, &std.ascii.spaces);
const segs1 = parse_segments(l1, pool[0..]);
const segs2 = parse_segments(l2, pool[segs1.len..]);
const isec = intersects(segs1, segs2).?;
return [_][]const u8{
try std.fmt.allocPrint(allocator, "{}", .{isec.dist}),
try std.fmt.allocPrint(allocator, "{}", .{isec.score}),
};
}
pub const main = tools.defaultMain("2019/day03.txt", run); | 2019/day03.zig |
const std = @import("std");
const fs = std.fs;
pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const allocator = &gpa.allocator;
const input = try fs.cwd().readFileAlloc(allocator, "data/input_02_1.txt", std.math.maxInt(usize));
// Solution 1
{
var lines = std.mem.tokenize(input, "\n");
var count :i32 = 0;
while (lines.next()) |line| {
const trimmed = std.mem.trim(u8, line, " \n");
if (trimmed.len == 0)
continue;
var password_it = std.mem.split(line, ":");
const rule = password_it.next().?;
const pass = std.mem.trim(u8, password_it.next().?, " ");
// Parse the rule
var rule_it = std.mem.split(rule, " ");
const limits = rule_it.next().?;
const char = rule_it.next().?[0];
// Parse the limit
var limit_it = std.mem.split(limits, "-");
const min = try std.fmt.parseInt(i32, limit_it.next().?, 10);
const max = try std.fmt.parseInt(i32, limit_it.next().?, 10);
var ccount :i32 = 0;
for (pass) |c| {
if (c == char)
ccount += 1;
}
if (ccount >= min and ccount <= max) count += 1;
}
std.debug.print("Day 02 - solution 1: {}\n", .{count});
}
// Solution 2
{
var lines = std.mem.tokenize(input, "\n");
var count :i32 = 0;
while (lines.next()) |line| {
const trimmed = std.mem.trim(u8, line, " \n");
if (trimmed.len == 0)
continue;
var password_it = std.mem.split(line, ":");
const rule = password_it.next().?;
const pass = std.mem.trim(u8, password_it.next().?, " ");
// Parse the rule
var rule_it = std.mem.split(rule, " ");
const positions = rule_it.next().?;
const char = rule_it.next().?[0];
// Parse the limit
var position_it = std.mem.split(positions, "-");
const p0 = (try std.fmt.parseInt(usize, position_it.next().?, 10)) - 1;
const p1 = (try std.fmt.parseInt(usize, position_it.next().?, 10)) - 1;
if (pass.len < p0 or pass.len < p1)
continue;
var cfound :i32 = if (pass[p0] == char) 1 else 0;
if (pass[p1] == char)
cfound += 1;
if (cfound == 1)
count += 1;
}
std.debug.print("Day 02 - solution 2: {}\n", .{count});
}
} | 2020/src/day_02.zig |
const wlr = @import("../wlroots.zig");
const std = @import("std");
const wayland = @import("wayland");
const wl = wayland.server.wl;
const zwp = wayland.server.zwp;
pub const PointerGesturesV1 = extern struct {
global: *wl.Global,
swipes: wl.list.Head(zwp.PointerGestureSwipeV1, null),
pinches: wl.list.Head(zwp.PointerGesturePinchV1, null),
holds: wl.list.Head(zwp.PointerGestureHoldV1, null),
server_destroy: wl.Listener(*wl.Server),
events: extern struct {
destroy: wl.Signal(*PointerGesturesV1),
},
data: usize,
extern fn wlr_pointer_gestures_v1_create(server: *wl.Server) ?*PointerGesturesV1;
pub fn create(server: *wl.Server) !*PointerGesturesV1 {
return wlr_pointer_gestures_v1_create(server) orelse error.OutOfMemory;
}
extern fn wlr_pointer_gestures_v1_send_swipe_begin(
pointer_gestures: *PointerGesturesV1,
seat: *wlr.Seat,
time_msec: u32,
fingers: u32,
) void;
pub const sendSwipeBegin = wlr_pointer_gestures_v1_send_swipe_begin;
extern fn wlr_pointer_gestures_v1_send_swipe_update(
pointer_gestures: *PointerGesturesV1,
seat: *wlr.Seat,
time_msec: u32,
dx: f64,
dy: f64,
) void;
pub const sendSwipeUpdate = wlr_pointer_gestures_v1_send_swipe_update;
extern fn wlr_pointer_gestures_v1_send_swipe_end(
pointer_gestures: *PointerGesturesV1,
seat: *wlr.Seat,
time_msec: u32,
cancelled: bool,
) void;
pub const sendSwipeEnd = wlr_pointer_gestures_v1_send_swipe_end;
extern fn wlr_pointer_gestures_v1_send_pinch_begin(
pointer_gestures: *PointerGesturesV1,
seat: *wlr.Seat,
time_msec: u32,
fingers: u32,
) void;
pub const sendPinchBegin = wlr_pointer_gestures_v1_send_pinch_begin;
extern fn wlr_pointer_gestures_v1_send_pinch_update(
pointer_gestures: *PointerGesturesV1,
seat: *wlr.Seat,
time_msec: u32,
dx: f64,
dy: f64,
scale: f64,
rotation: f64,
) void;
pub const sendPinchUpdate = wlr_pointer_gestures_v1_send_pinch_update;
extern fn wlr_pointer_gestures_v1_send_pinch_end(
pointer_gestures: *PointerGesturesV1,
seat: *wlr.Seat,
time_msec: u32,
cancelled: bool,
) void;
pub const sendPinchEnd = wlr_pointer_gestures_v1_send_pinch_end;
extern fn wlr_pointer_gestures_v1_send_hold_begin(
pointer_gestures: *PointerGesturesV1,
seat: *wlr.Seat,
time_msec: u32,
fingers: u32,
) void;
pub const sendHoldBegin = wlr_pointer_gestures_v1_send_hold_begin;
extern fn wlr_pointer_gestures_v1_send_hold_end(
pointer_gestures: *PointerGesturesV1,
seat: *wlr.Seat,
time_msec: u32,
cancelled: bool,
) void;
pub const sendHoldEnd = wlr_pointer_gestures_v1_send_hold_end;
}; | src/types/pointer_gestures_v1.zig |
const std = @import("std");
const glfw = @import("glfw");
const c = @cImport({
@cDefine("CIMGUI_DEFINE_ENUMS_AND_STRUCTS", "");
// @cDefine("IMGUI_IMPL_API", "extern \"C\" __declspec(dllexport)");
@cInclude("cimgui.h");
@cInclude("./cimgui_impl.h");
// @cUndef("CIMGUI_DEFINE_ENUMS_AND_STRUCTS");
// @cInclude("imgui_impl_glfw.h");
// @cInclude("imgui_impl_opengl3.h");
// @cInclude("imgui_impl_opengl3_loader.h");
});
pub fn main() !void {
try glfw.init();
defer glfw.terminate();
const stdout = std.io.getStdOut().writer();
try stdout.print("-*- zig-imgui-template -*-\n", .{});
const glsl_version = "#version 130";
try glfw.Window.hint(glfw.Window.Hint.context_version_major, 3);
try glfw.Window.hint(glfw.Window.Hint.context_version_minor, 0);
const window = try glfw.Window.create(1280, 720, "-*- zig-imgui-template -*-", null, null);
defer window.destroy();
try glfw.makeContextCurrent(window);
try glfw.swapInterval(1); // vsync
try stdout.print("imgui version: {s}\n", .{c.igGetVersion()});
var igContext = c.igCreateContext(null);
defer c.igDestroyContext(igContext);
var style = c.igGetStyle();
var cl: *[c.ImGuiCol_COUNT]c.ImVec4 = &style.*.Colors;
// -*- Solarized Light/Dark -*-
// http://www.zovirl.com/2011/07/22/solarized_cheat_sheet/
const base03 = c.ImVec4{.x=0.00,.y=0.17,.z=0.21,.w=1.00};
const base02 = c.ImVec4{.x=0.03,.y=0.21,.z=0.26,.w=1.00};
const base01 = c.ImVec4{.x=0.35,.y=0.43,.z=0.46,.w=1.00};
const base00 = c.ImVec4{.x=0.40,.y=0.48,.z=0.51,.w=1.00};
const base0 = c.ImVec4{.x=0.51,.y=0.58,.z=0.59,.w=1.00};
const base1 = c.ImVec4{.x=0.58,.y=0.63,.z=0.63,.w=1.00};
const base2 = c.ImVec4{.x=0.93,.y=0.91,.z=0.84,.w=1.00};
const base3 = c.ImVec4{.x=0.99,.y=0.96,.z=0.89,.w=1.00};
const yellow = c.ImVec4{.x=0.71,.y=0.54,.z=0.00,.w=1.00};
const orange = c.ImVec4{.x=0.80,.y=0.29,.z=0.09,.w=1.00};
const red = c.ImVec4{.x=0.86,.y=0.20,.z=0.18,.w=1.00};
const magenta = c.ImVec4{.x=0.83,.y=0.21,.z=0.51,.w=1.00};
const violet = c.ImVec4{.x=0.42,.y=0.44,.z=0.77,.w=1.00};
const blue = c.ImVec4{.x=0.15,.y=0.55,.z=0.82,.w=1.00};
const cyan = c.ImVec4{.x=0.16,.y=0.63,.z=0.60,.w=1.00};
const green = c.ImVec4{.x=0.52,.y=0.60,.z=0.00,.w=1.00};
_ = base03;
_ = base02;
_ = base01;
_ = base0;
_ = base1;
_ = yellow;
_ = orange;
_ = red;
_ = magenta;
_ = violet;
_ = blue;
_ = cyan;
_ = green;
// light:
// base 01 - emphasized content
// base 00 - body text / primary content
// base 1 - comments / secondary content
// base 2 - background highlights
// base 3 - background
cl[c.ImGuiCol_Text] = base00;
cl[c.ImGuiCol_TextDisabled] = base1;
cl[c.ImGuiCol_WindowBg] = base3;
cl[c.ImGuiCol_ChildBg] = base3;
cl[c.ImGuiCol_PopupBg] = base3;
cl[c.ImGuiCol_Border] = base2;
cl[c.ImGuiCol_BorderShadow] = c.ImVec4{.x=0.00,.y=0.00,.z=0.00,.w=0.00};
cl[c.ImGuiCol_FrameBg] = base3;
cl[c.ImGuiCol_FrameBgHovered] = base3;
cl[c.ImGuiCol_FrameBgActive] = base3;
cl[c.ImGuiCol_TitleBg] = base2;
cl[c.ImGuiCol_TitleBgActive] = base2;
cl[c.ImGuiCol_TitleBgCollapsed] = base3;
cl[c.ImGuiCol_MenuBarBg] = base2;
cl[c.ImGuiCol_ScrollbarBg] = c.ImVec4{.x=0.98,.y=0.98,.z=0.98,.w=0.53};
cl[c.ImGuiCol_ScrollbarGrab] = c.ImVec4{.x=0.69,.y=0.69,.z=0.69,.w=0.80};
cl[c.ImGuiCol_ScrollbarGrabHovered] = c.ImVec4{.x=0.49,.y=0.49,.z=0.49,.w=0.80};
cl[c.ImGuiCol_ScrollbarGrabActive] = c.ImVec4{.x=0.49,.y=0.49,.z=0.49,.w=1.00};
cl[c.ImGuiCol_CheckMark] = c.ImVec4{.x=0.26,.y=0.59,.z=0.98,.w=1.00};
cl[c.ImGuiCol_SliderGrab] = c.ImVec4{.x=0.26,.y=0.59,.z=0.98,.w=0.78};
cl[c.ImGuiCol_SliderGrabActive] = c.ImVec4{.x=0.46,.y=0.54,.z=0.80,.w=0.60};
cl[c.ImGuiCol_Button] = c.ImVec4{.x=0.26,.y=0.59,.z=0.98,.w=0.40};
cl[c.ImGuiCol_ButtonHovered] = c.ImVec4{.x=0.26,.y=0.59,.z=0.98,.w=1.00};
cl[c.ImGuiCol_ButtonActive] = c.ImVec4{.x=0.06,.y=0.53,.z=0.98,.w=1.00};
cl[c.ImGuiCol_Header] = c.ImVec4{.x=0.26,.y=0.59,.z=0.98,.w=0.31};
cl[c.ImGuiCol_HeaderHovered] = c.ImVec4{.x=0.26,.y=0.59,.z=0.98,.w=0.80};
cl[c.ImGuiCol_HeaderActive] = c.ImVec4{.x=0.26,.y=0.59,.z=0.98,.w=1.00};
cl[c.ImGuiCol_Separator] = c.ImVec4{.x=0.39,.y=0.39,.z=0.39,.w=0.62};
cl[c.ImGuiCol_SeparatorHovered] = c.ImVec4{.x=0.14,.y=0.44,.z=0.80,.w=0.78};
cl[c.ImGuiCol_SeparatorActive] = c.ImVec4{.x=0.14,.y=0.44,.z=0.80,.w=1.00};
cl[c.ImGuiCol_ResizeGrip] = c.ImVec4{.x=0.35,.y=0.35,.z=0.35,.w=0.17};
cl[c.ImGuiCol_ResizeGripHovered] = c.ImVec4{.x=0.26,.y=0.59,.z=0.98,.w=0.67};
cl[c.ImGuiCol_ResizeGripActive] = c.ImVec4{.x=0.26,.y=0.59,.z=0.98,.w=0.95};
cl[c.ImGuiCol_Tab] = c.ImVec4{.x=0.76,.y=0.80,.z=0.84,.w=0.93};
cl[c.ImGuiCol_TabHovered] = c.ImVec4{.x=0.26,.y=0.59,.z=0.98,.w=0.80};
cl[c.ImGuiCol_TabActive] = c.ImVec4{.x=0.60,.y=0.73,.z=0.88,.w=1.00};
cl[c.ImGuiCol_TabUnfocused] = c.ImVec4{.x=0.92,.y=0.93,.z=0.94,.w=0.99};
cl[c.ImGuiCol_TabUnfocusedActive] = c.ImVec4{.x=0.74,.y=0.82,.z=0.91,.w=1.00};
cl[c.ImGuiCol_PlotLines] = c.ImVec4{.x=0.39,.y=0.39,.z=0.39,.w=1.00};
cl[c.ImGuiCol_PlotLinesHovered] = c.ImVec4{.x=1.00,.y=0.43,.z=0.35,.w=1.00};
cl[c.ImGuiCol_PlotHistogram] = c.ImVec4{.x=0.90,.y=0.70,.z=0.00,.w=1.00};
cl[c.ImGuiCol_PlotHistogramHovered] = c.ImVec4{.x=1.00,.y=0.45,.z=0.00,.w=1.00};
cl[c.ImGuiCol_TableHeaderBg] = c.ImVec4{.x=0.78,.y=0.87,.z=0.98,.w=1.00};
cl[c.ImGuiCol_TableBorderStrong] = c.ImVec4{.x=0.57,.y=0.57,.z=0.64,.w=1.00};
cl[c.ImGuiCol_TableBorderLight] = c.ImVec4{.x=0.68,.y=0.68,.z=0.74,.w=1.00};
cl[c.ImGuiCol_TableRowBg] = c.ImVec4{.x=0.00,.y=0.00,.z=0.00,.w=0.00};
cl[c.ImGuiCol_TableRowBgAlt] = c.ImVec4{.x=0.30,.y=0.30,.z=0.30,.w=0.09};
cl[c.ImGuiCol_TextSelectedBg] = c.ImVec4{.x=0.26,.y=0.59,.z=0.98,.w=0.35};
cl[c.ImGuiCol_DragDropTarget] = c.ImVec4{.x=0.26,.y=0.59,.z=0.98,.w=0.95};
cl[c.ImGuiCol_NavHighlight] = c.ImVec4{.x=0.26,.y=0.59,.z=0.98,.w=0.80};
cl[c.ImGuiCol_NavWindowingHighlight]= c.ImVec4{.x=0.70,.y=0.70,.z=0.70,.w=0.70};
cl[c.ImGuiCol_NavWindowingDimBg] = c.ImVec4{.x=0.20,.y=0.20,.z=0.20,.w=0.20};
cl[c.ImGuiCol_ModalWindowDimBg] = c.ImVec4{.x=0.20,.y=0.20,.z=0.20,.w=0.35};
var io: *c.ImGuiIO = c.igGetIO();
var text_pixels: [*c]u8 = undefined;
var text_w: i32 = undefined;
var text_h: i32 = undefined;
c.ImFontAtlas_GetTexDataAsRGBA32(io.Fonts, &text_pixels, &text_w, &text_h, null);
_ = c.ImGui_ImplGlfw_InitForOpenGL(@ptrCast(*c.GLFWwindow, window.handle), true);
_ = c.ImGui_ImplOpenGL3_Init(glsl_version);
var font = c.ImFontAtlas_AddFontFromFileTTF(io.Fonts, "res/font/CascadiaMonoPL.ttf", 15.0, null, c.ImFontAtlas_GetGlyphRangesDefault(io.Fonts));
_ = c.ImFontAtlas_Build(io.Fonts);
var display_size = c.ImVec2{
.x = 1280,
.y = 720,
};
io.DisplaySize = display_size;
io.DeltaTime = 1.0 / 60.0;
var show_demo_window = true;
var run = true;
while (!window.shouldClose() and run) {
try glfw.pollEvents();
var optional_action: ?glfw.Action = window.getKey(glfw.Key.escape) catch null;
if (optional_action) |action| {
if (action == glfw.Action.press) {
run = false;
}
}
c.ImGui_ImplOpenGL3_NewFrame();
c.ImGui_ImplGlfw_NewFrame();
c.igNewFrame();
c.igPushFont(font);
c.igShowDemoWindow(&show_demo_window);
{
_ = c.igBegin("colors", null, 0);
const size = c.ImVec2{.x=50, .y=24};
c.igPushStyleColor_Vec4(c.ImGuiCol_ButtonHovered, base3);
c.igPushStyleColor_Vec4(c.ImGuiCol_Button, base03);
_ = c.igButton("base03", size);
c.igPopStyleColor(1);
c.igPushStyleColor_Vec4(c.ImGuiCol_Button, base02);
_ = c.igButton("base02", size);
c.igPopStyleColor(1);
c.igPushStyleColor_Vec4(c.ImGuiCol_Button, base01);
_ = c.igButton("base01", size);
c.igPopStyleColor(1);
c.igPushStyleColor_Vec4(c.ImGuiCol_Button, base00);
_ = c.igButton("base00", size);
c.igPopStyleColor(1);
c.igPushStyleColor_Vec4(c.ImGuiCol_Button, base0);
_ = c.igButton("base0", size);
c.igPopStyleColor(1);
c.igPushStyleColor_Vec4(c.ImGuiCol_Button, base1);
_ = c.igButton("base1", size);
c.igPopStyleColor(1);
c.igPushStyleColor_Vec4(c.ImGuiCol_Button, base2);
_ = c.igButton("base2", size);
c.igPopStyleColor(1);
c.igPushStyleColor_Vec4(c.ImGuiCol_Button, base3);
_ = c.igButton("base3", size);
c.igPopStyleColor(1);
c.igPushStyleColor_Vec4(c.ImGuiCol_Button, yellow);
_ = c.igButton("yellow", size);
c.igPopStyleColor(1);
c.igPushStyleColor_Vec4(c.ImGuiCol_Button, orange);
_ = c.igButton("orange", size);
c.igPopStyleColor(1);
c.igPushStyleColor_Vec4(c.ImGuiCol_Button, red);
_ = c.igButton("red", size);
c.igPopStyleColor(1);
c.igPushStyleColor_Vec4(c.ImGuiCol_Button, magenta);
_ = c.igButton("magenta", size);
c.igPopStyleColor(1);
c.igPushStyleColor_Vec4(c.ImGuiCol_Button, violet);
_ = c.igButton("violet", size);
c.igPopStyleColor(1);
c.igPushStyleColor_Vec4(c.ImGuiCol_Button, blue);
_ = c.igButton("blue", size);
c.igPopStyleColor(1);
c.igPushStyleColor_Vec4(c.ImGuiCol_Button, cyan);
_ = c.igButton("cyan", size);
c.igPopStyleColor(1);
c.igPushStyleColor_Vec4(c.ImGuiCol_Button, green);
_ = c.igButton("green", size);
c.igPopStyleColor(1);
c.igPopStyleColor(1);
c.igEnd();
}
c.igPopFont();
c.igRender();
const size = try window.getFramebufferSize();
c.glViewport(0, 0, @intCast(c_int, size.width), @intCast(c_int, size.height));
c.glClearColor(0.9, 0.9, 0.9, 0);
c.glClear(c.GL_COLOR_BUFFER_BIT);
c.ImGui_ImplOpenGL3_RenderDrawData(c.igGetDrawData());
try window.swapBuffers();
}
} | src/main.zig |
const std = @import("std");
const builtin = @import("builtin");
const build_options = @import("build_options");
pub const kernel = @import("kernel.zig");
const kernel_main = kernel.kernel_main;
const utils = @import("utils");
// pub const sse_enabled: bool = blk: {
// for (builtin.cpu.arch.allFeaturesList()) |feature, index_usize| {
// const index = @intCast(std.Target.Cpu.Feature.Set.Index, index_usize);
// if (builtin.cpu.features.isEnabled(index)) {
// if (feature.name.len >= 3 and std.mem.eql(u8, feature.name[0..3], "sse")) {
// break :blk true;
// }
// }
// }
// break :blk false;
// };
pub fn panic(msg: []const u8, trace: ?*std.builtin.StackTrace) noreturn {
kernel.panic(msg, trace);
}
// TODO: Maybe refactor when struct fields get custom alignment
const Multiboot2Header = packed struct {
const magic_value: u32 = 0xe85250d6;
const architecture_x86_32: u32 = 0;
const architecture_value: u32 = architecture_x86_32;
const tag_kind_end = 0;
const tag_kind_info_request = 1;
const tag_kind_framebuffer = 5;
const tag_flag_must_understand: u16 = 0;
const tag_flag_optional: u16 = 1;
const VgaMode = if (build_options.multiboot_vbe) packed struct {
const InfoRequestTag = packed struct {
kind: u16 = tag_kind_info_request,
flags: u16 = tag_flag_must_understand,
size: u32 = @sizeOf(@This()),
tag0: u32 = 7, // VBE
};
const FramebufferTag = packed struct {
kind: u16 = tag_kind_framebuffer,
flags: u16 = tag_flag_must_understand,
size: u32 = @sizeOf(@This()),
width: u32 = 800,
height: u32 = 600,
depth: u32 = 24,
};
info_request_tag: InfoRequestTag = InfoRequestTag{},
padding0: u32 = 0,
framebuffer_tag: FramebufferTag = FramebufferTag{},
padding1: u32 = 0,
} else void;
magic: u32 = magic_value,
architecture: u32 = architecture_value,
header_length: u32 = @sizeOf(@This()),
checksum: u32 = @bitCast(u32, -(@bitCast(i32,
magic_value + architecture_value + @sizeOf(@This())))),
vga_mode: VgaMode = VgaMode{},
end_tag_kind: u16 = tag_kind_end,
end_tag_flags: u16 = tag_flag_must_understand,
end_tag_size: u32 = 8,
};
export var multiboot2_header align(8) linksection(".multiboot") =
Multiboot2Header{};
/// Real Address of multiboot_info
extern var low_multiboot_info: []u32;
/// Real Address of kernel_range_start_available
extern var low_kernel_range_start_available: u32;
/// Real Address of kernel_page_table_count
extern var low_kernel_page_table_count: u32;
/// Real Address of kernel_page_tables
extern var low_kernel_page_tables: []u32;
/// Real Address of page_directory
extern var low_page_directory: [utils.Ki(1)]u32;
/// Stack for kernel_main_wrapper(). This will be reclaimed later as a frame
/// when the memory system is initialized.
pub export var temp_stack: [utils.Ki(4)]u8
align(utils.Ki(4)) linksection(".low_bss") = undefined;
/// Stack for kernel_main()
export var stack: [utils.Ki(8)]u8 align(16) linksection(".bss") = undefined;
/// Entry Point
export fn kernel_start() linksection(".low_text") callconv(.Naked) noreturn {
@setRuntimeSafety(false);
// Save location of Multiboot2 Info
low_multiboot_info.ptr = asm volatile (
// Check for the Multiboot2 magic value in eax. It is a fatal error if
// we don't have it, but we can't report it yet so set the pointer to 0
// and we will panic later when we first try to use it.
//
\\ cmpl $0x36d76289, %%eax
\\ je passed_multiboot_check
\\ mov $0, %%ebx
\\ passed_multiboot_check:
:
[rv] "={ebx}" (-> [*]u32)
);
// This just forces Zig to include multiboot2_header, which export isn't
// doing for some reason. TODO: Report as bug?
if (multiboot2_header.magic != 0xe85250d6) {
asm volatile ("nop");
}
// Not using @newStackCall as it seems to assume there is an existing
// stack.
asm volatile (
\\ mov %[temp_stack_end], %%esp
\\ jmp kernel_main_wrapper
::
[temp_stack_end] "{eax}" (
@ptrToInt(&temp_stack[0]) + temp_stack.len)
);
unreachable;
}
extern var _VIRTUAL_OFFSET: u32;
extern var _REAL_START: u32;
extern var _REAL_END: u32;
extern var _FRAME_SIZE: u32;
fn align_down(value: u32, align_by: u32) linksection(".low_text") u32 {
return value & -%(align_by);
}
fn align_up(value: u32, align_by: u32) linksection(".low_text") u32 {
return align_down(value + align_by - 1, align_by);
}
/// Get setup for kernel_main
export fn kernel_main_wrapper() linksection(".low_text") noreturn {
@setRuntimeSafety(false);
// Otherwise Zig inserts a call to a high kernel linked internal function
// called __zig_probe_stack at the start. Runtime safety doesn't do much
// good before kernel_main anyway.
// TODO: Report as bug?
const offset = @ptrToInt(&_VIRTUAL_OFFSET);
const kernel_end = @ptrToInt(&_REAL_END);
const frame_size = @ptrToInt(&_FRAME_SIZE);
const after_kernel = align_up(kernel_end, frame_size);
const pages_per_table = 1 << 10;
// If we have it, copy Multiboot information because we could accidentally
// overwrite it. Otherwise continue to defer the error.
var page_tables_start: u32 = after_kernel;
if (@ptrToInt(low_multiboot_info.ptr) != 0) {
const multiboot_info_size = low_multiboot_info[0];
low_multiboot_info.len = multiboot_info_size >> 2;
var multiboot_info_dest = after_kernel;
for (low_multiboot_info) |*ptr, i| {
@intToPtr([*]u32, multiboot_info_dest)[i] = ptr.*;
}
low_multiboot_info.ptr = @intToPtr([*]u32, multiboot_info_dest + offset);
const multiboot_info_end = multiboot_info_dest + multiboot_info_size;
page_tables_start = align_up(multiboot_info_end, frame_size);
}
// Create Page Tables for First 1MiB + Kernel + Multiboot + Page Tables
// This is an iterative process for now. Start with 1 table, see if that's
// enough. If not add another table.
var frame_count: usize = (page_tables_start / frame_size) + 1;
while (true) {
low_kernel_page_table_count =
align_up(frame_count, pages_per_table) / pages_per_table;
if (frame_count <= low_kernel_page_table_count * pages_per_table) {
break;
}
frame_count += 1;
}
const low_page_tables_end = page_tables_start +
low_kernel_page_table_count * frame_size;
// Set the start of what the memory system can work with.
low_kernel_range_start_available = low_page_tables_end;
// Get Slice for the Initial Page Tables
low_kernel_page_tables.ptr =
@intToPtr([*]u32, @intCast(usize, page_tables_start));
low_kernel_page_tables.len = pages_per_table * low_kernel_page_table_count;
// Initialize Paging Structures to Zeros
for (low_page_directory[0..]) |*ptr| {
ptr.* = 0;
}
for (low_kernel_page_tables[0..]) |*ptr| {
ptr.* = 0;
}
// Virtually Map Kernel to the Real Location and the Kernel Offset
var table_i: usize = 0;
while (table_i < low_kernel_page_table_count) {
const table_start = &low_kernel_page_tables[table_i * utils.Ki(1)];
const entry = (@ptrToInt(table_start) & 0xFFFFF000) | 1;
low_page_directory[table_i] = entry;
low_page_directory[(offset >> 22) + table_i] = entry; // Div by 4MiB
table_i += 1;
}
for (low_kernel_page_tables[0..frame_count]) |*ptr, i| {
ptr.* = i * utils.Ki(4) + 1;
}
low_kernel_page_tables[0] = 0;
// Translate for high mode
low_kernel_page_tables.ptr =
@intToPtr([*]u32, @ptrToInt(low_kernel_page_tables.ptr) + offset);
// Use that Paging Scheme
asm volatile (
\\ // Set Page Directory
\\ mov $low_page_directory, %%eax
\\ mov %%eax, %%cr3
\\ // Enable Paging
\\ mov %%cr0, %%eax
\\ or $0x80000001, %%eax
\\ mov %%eax, %%cr0
:::
"eax"
);
// Zig 0.6 will try to use SSE in normal generated code, at least while
// setting an array to undefined in debug mode. Enable SSE to allow that to
// work.
// This also allows us to explicitly take advantage of it.
// Based on the initialization code in https://wiki.osdev.org/SSE
// TODO: Disabled for now in build.zig because we need to support saving
// and restoring SSE registers first.
// if (sse_enabled) {
// asm volatile (
// \\ mov %%cr0, %%eax
// \\ and $0xFFFB, %%ax
// \\ or $0x0002, %%ax
// \\ mov %%eax, %%cr0
// \\ mov %%cr4, %%eax
// \\ or $0x0600, %%ax
// \\ mov %%eax, %%cr4
// :::
// "eax"
// );
// }
// Start the generic main function, jumping to high memory kernel at the
// same time.
asm volatile (
\\mov %[stack_end], %%esp
::
[stack_end] "{eax}" (
@ptrToInt(&stack[0]) + stack.len)
);
kernel_main();
unreachable;
} | kernel/kernel_start_x86_32.zig |
const std = @import("std");
const ArrayList = std.ArrayList;
pub const Value = union(enum) {
Uint8: u8,
Uint16: u16,
Uint32: u32,
Uint64: u64,
Int8: i8,
Int16: i16,
Int32: i32,
Int64: i64,
Float32: f32,
Float64: f64,
Null,
Bool: bool,
String: []const u8,
Array: []const Value,
Map: *const std.StringHashMap(Value),
};
pub fn toVal(thing: anytype, comptime T_opt: ?type) Value {
const T = T_opt orelse @TypeOf(thing);
return switch (T) {
u8 => Value{ .Uint8 = thing },
u16 => Value{ .Uint16 = thing },
u32 => Value{ .Uint32 = thing },
u64 => Value{ .Uint64 = thing },
i8 => Value{ .Int8 = thing },
i16 => Value{ .Int16 = thing },
i32 => Value{ .Int32 = thing },
i64 => Value{ .Int64 = thing },
f32 => Value{ .Float32 = thing },
f64 => Value{ .Float64 = thing },
bool => Value{ .Bool = thing },
[]const u8 => Value{ .String = thing },
[]const Value => Value{ .Array = thing },
std.StringHashMap(Value) => Value{ .Map = &thing },
@TypeOf(null) => Value{ .Null = {} },
else => @compileLog("Can't serialize type ", thing, " to msgpack."),
};
}
pub fn serializeAndAppend(array: *ArrayList(u8), val: Value) anyerror!void {
switch (val) {
Value.Null => try array.*.append(0xc0),
Value.Int8 => |x| {
if (!(x >= -32)) {
// Not a fixint, so add start for serializing an int8
try array.*.append(0xd0);
}
try array.*.append(@intCast(u8, @as(i16, x) & 0xFF));
},
Value.Int16 => |x| {
try array.*.append(0xd1);
try array.*.append(@intCast(u8, (x >> 8) & 0xFF));
try array.*.append(@intCast(u8, x & 0xFF));
},
Value.Int32 => |x| {
try array.*.append(0xd2);
try array.*.append(@intCast(u8, (x >> 24) & 0xFF));
try array.*.append(@intCast(u8, (x >> 16) & 0xFF));
try array.*.append(@intCast(u8, (x >> 8) & 0xFF));
try array.*.append(@intCast(u8, x & 0xFF));
},
Value.Int64 => |x| {
try array.*.append(0xd3);
try array.*.append(@intCast(u8, (x >> 56) & 0xFF));
try array.*.append(@intCast(u8, (x >> 48) & 0xFF));
try array.*.append(@intCast(u8, (x >> 40) & 0xFF));
try array.*.append(@intCast(u8, (x >> 32) & 0xFF));
try array.*.append(@intCast(u8, (x >> 24) & 0xFF));
try array.*.append(@intCast(u8, (x >> 16) & 0xFF));
try array.*.append(@intCast(u8, (x >> 8) & 0xFF));
try array.*.append(@intCast(u8, x & 0xFF));
},
Value.Uint8 => |x| {
try array.*.append(0xcc);
try array.*.append(@intCast(u8, @as(i16, x) & 0xFF));
},
Value.Uint16 => |x| {
try array.*.append(0xcd);
try array.*.append(@intCast(u8, (x >> 8) & 0xFF));
try array.*.append(@intCast(u8, x & 0xFF));
},
Value.Uint32 => |x| {
try array.*.append(0xce);
try array.*.append(@intCast(u8, (x >> 24) & 0xFF));
try array.*.append(@intCast(u8, (x >> 16) & 0xFF));
try array.*.append(@intCast(u8, (x >> 8) & 0xFF));
try array.*.append(@intCast(u8, x & 0xFF));
},
Value.Uint64 => |x| {
try array.*.append(0xcf);
try array.*.append(@intCast(u8, (x >> 56) & 0xFF));
try array.*.append(@intCast(u8, (x >> 48) & 0xFF));
try array.*.append(@intCast(u8, (x >> 40) & 0xFF));
try array.*.append(@intCast(u8, (x >> 32) & 0xFF));
try array.*.append(@intCast(u8, (x >> 24) & 0xFF));
try array.*.append(@intCast(u8, (x >> 16) & 0xFF));
try array.*.append(@intCast(u8, (x >> 8) & 0xFF));
try array.*.append(@intCast(u8, x & 0xFF));
},
Value.Bool => |x| {
if (x) {
try array.*.append(0xc3);
} else {
try array.*.append(0xc2);
}
},
Value.Float32 => |x| {
try array.*.append(0xca);
const x_u32 = @bitCast(u32, x);
try array.*.append(@intCast(u8, (x_u32 >> 24) & 0xFF));
try array.*.append(@intCast(u8, (x_u32 >> 16) & 0xFF));
try array.*.append(@intCast(u8, (x_u32 >> 8) & 0xFF));
try array.*.append(@intCast(u8, x_u32 & 0xFF));
},
Value.Float64 => |x| {
try array.*.append(0xcb);
const x_u64 = @bitCast(u64, x);
try array.*.append(@intCast(u8, (x_u64 >> 56) & 0xFF));
try array.*.append(@intCast(u8, (x_u64 >> 48) & 0xFF));
try array.*.append(@intCast(u8, (x_u64 >> 40) & 0xFF));
try array.*.append(@intCast(u8, (x_u64 >> 32) & 0xFF));
try array.*.append(@intCast(u8, (x_u64 >> 24) & 0xFF));
try array.*.append(@intCast(u8, (x_u64 >> 16) & 0xFF));
try array.*.append(@intCast(u8, (x_u64 >> 8) & 0xFF));
try array.*.append(@intCast(u8, x_u64 & 0xFF));
},
Value.String => |x| {
if (x.len < 31) {
// fixstr
try array.*.append(0xa0 | @intCast(u8, x.len));
} else if (x.len <= std.math.maxInt(u8)) {
// str8
try array.*.append(0xd9);
try array.*.append(@intCast(u8, x.len));
} else if (x.len <= std.math.maxInt(u16)) {
// str16
try array.*.append(0xda);
try array.*.append(@intCast(u8, (x.len >> 8) & 0xFF));
try array.*.append(@intCast(u8, x.len & 0xFF));
} else {
// assume str32
try array.*.append(0xdb);
try array.*.append(@intCast(u8, (x.len >> 24) & 0xFF));
try array.*.append(@intCast(u8, (x.len >> 16) & 0xFF));
try array.*.append(@intCast(u8, (x.len >> 8) & 0xFF));
try array.*.append(@intCast(u8, x.len & 0xFF));
}
for (x) |byte| {
try array.*.append(byte);
}
},
Value.Array => |xs| {
try startArray(array, xs.len);
for (xs) |x| {
try serializeAndAppend(array, x);
}
},
Value.Map => |map| {
var iterator = map.*.iterator();
try startMap(array, map.*.count());
while (iterator.next()) |x| {
try serializeAndAppend(array, toVal(x.key, null));
try serializeAndAppend(array, x.value);
}
},
}
}
fn startArray(array: *ArrayList(u8), count: u64) !void {
if (count <= 15) {
try array.*.append(0x90 | @intCast(u8, count));
} else if (count <= std.math.maxInt(u16)) {
try array.*.append(0xdc);
try array.*.append(@intCast(u8, (@intCast(u16, count) >> 8) & 0xFF));
try array.*.append(@intCast(u8, count & 0xFF));
} else {
try array.*.append(0xdd);
try array.*.append(@intCast(u8, (count >> 24) & 0xFF));
try array.*.append(@intCast(u8, (count >> 16) & 0xFF));
try array.*.append(@intCast(u8, (count >> 8) & 0xFF));
try array.*.append(@intCast(u8, count & 0xFF));
}
}
pub fn serializeList(allocator: *std.mem.Allocator, values: []const Value) !ArrayList(u8) {
var item_list = ArrayList(u8).init(allocator);
errdefer item_list.deinit();
try startArray(&item_list, values.len);
for (values) |val| {
try serializeAndAppend(&item_list, val);
}
return item_list;
}
pub fn startMap(array: *ArrayList(u8), count: u64) !void {
if (count <= 15) {
try array.*.append(0x80 | @intCast(u8, count));
} else if (count <= std.math.maxInt(u16)) {
try array.*.append(0xde);
try array.*.append(@intCast(u8, (@intCast(u16, count) >> 8) & 0xFF));
try array.*.append(@intCast(u8, count & 0xFF));
} else {
try array.*.append(0xdf);
try array.*.append(@intCast(u8, (count >> 24) & 0xFF));
try array.*.append(@intCast(u8, (count >> 16) & 0xFF));
try array.*.append(@intCast(u8, (count >> 8) & 0xFF));
try array.*.append(@intCast(u8, count & 0xFF));
}
}
fn deserializeSomething16(comptime T: type, bytes: []const u8) Value {
return toVal(@as(T, bytes[0]) << 8 |
@as(T, bytes[1]), T);
}
fn deserializeSomething32(comptime T: type, bytes: []const u8) Value {
return toVal(@as(T, bytes[0]) << 24 |
@as(T, bytes[1]) << 16 |
@as(T, bytes[2]) << 8 |
@as(T, bytes[3]), T);
}
fn deserializeSomething64(comptime T: type, bytes: []const u8) Value {
return toVal(@as(T, bytes[0]) << 56 |
@as(T, bytes[1]) << 48 |
@as(T, bytes[2]) << 40 |
@as(T, bytes[3]) << 32 |
@as(T, bytes[4]) << 24 |
@as(T, bytes[5]) << 16 |
@as(T, bytes[6]) << 8 |
@as(T, bytes[7]), T);
}
const DeserializeRet = struct {
deserialized: Value,
new_bytes: ?[]const u8,
};
// TODO(smolck): Maybe make allocator more general (or not explicitly an
// ArenaAllocator)? Needs to be an ArenaAllocator though basically, because
// otherwise there will be memory leaks from nested values not getting freed
// (or any values for that matter).
//
// Also, yes, this could probably be better than having a `deserializePrivate`
// and `deserialize` and all of this. But hey, it seems to work, so . . .
fn deserializePrivate(allocator: *std.heap.ArenaAllocator, bytes: []const u8) anyerror!DeserializeRet {
const starting_byte = bytes[0];
if ((starting_byte & 0xE0) == 0xA0) {
// Fixstr
const len = starting_byte & 0x1F;
return DeserializeRet{
.deserialized = toVal(bytes[1 .. len + 1], []const u8),
.new_bytes = bytes[len + 1 .. bytes.len],
};
} else if ((starting_byte & 0xF0) == 0x90) {
// Fixarray
const len = starting_byte & 0xF;
var values = try ArrayList(Value).initCapacity(&allocator.*.allocator, len);
var new_bytes = bytes[1..bytes.len];
var i: usize = 0;
while (i < len) : (i += 1) {
const d = try deserializePrivate(allocator, new_bytes);
try values.append(d.deserialized);
new_bytes = d.new_bytes.?;
}
return DeserializeRet{
.deserialized = toVal(values.toOwnedSlice(), []const Value),
.new_bytes = bytes[len + 1 .. bytes.len],
};
} else if ((starting_byte & 0xE0) == 0xE0) {
// Negative fixnum
return DeserializeRet{
.deserialized = toVal(@intCast(i8, @intCast(i16, starting_byte) - 256), i8),
.new_bytes = bytes[1..bytes.len],
};
} else if (starting_byte <= std.math.maxInt(i8)) {
// Positive fixnum
return DeserializeRet{
.deserialized = toVal(@bitCast(i8, starting_byte), i8),
.new_bytes = bytes[1..bytes.len],
};
}
switch (starting_byte) {
0xcc, // Uint8
0xd0 // Int8
=> return DeserializeRet{
.deserialized = if (starting_byte == 0xcc)
toVal(bytes[1], u8)
else
toVal(@bitCast(i8, bytes[1]), i8),
.new_bytes = bytes[2..bytes.len],
},
0xcd, // Uint16
0xd1 // Int16
=> return DeserializeRet{
.deserialized = if (starting_byte == 0xcd)
deserializeSomething16(u16, bytes[1..3])
else
deserializeSomething16(i16, bytes[1..3]),
.new_bytes = bytes[3..bytes.len],
},
0xce, // Uint32
0xd2, // Int32
=> return DeserializeRet{
.deserialized = if (starting_byte == 0xce)
deserializeSomething32(u32, bytes[1..5])
else
deserializeSomething32(i32, bytes[1..5]),
.new_bytes = bytes[5..bytes.len],
},
0xcf, // Uint64
0xd3 // Int64
=> return DeserializeRet{
.deserialized = if (starting_byte == 0xcf)
deserializeSomething64(u64, bytes[1..9])
else
deserializeSomething64(i64, bytes[1..9]),
.new_bytes = bytes[9..bytes.len],
},
0xca =>
// Float32
return DeserializeRet{
.deserialized = Value{ .Float32 = @bitCast(f32, deserializeSomething32(u32, bytes[1..5]).Uint32) },
.new_bytes = bytes[5..bytes.len],
},
0xcb =>
// Float64
return DeserializeRet{
.deserialized = Value{ .Float64 = @bitCast(f64, deserializeSomething64(u64, bytes[1..9]).Uint64) },
.new_bytes = bytes[9..bytes.len],
},
0xdc => {
// Array16
var len: usize = deserializeSomething16(u16, bytes[1..3]).Uint16;
var values = try ArrayList(Value).initCapacity(&allocator.*.allocator, len);
var new_bytes = bytes[3 .. len + 3];
var i: usize = 0;
while (i < len) : (i += 1) {
const d = try deserializePrivate(allocator, new_bytes);
try values.append(d.deserialized);
new_bytes = d.new_bytes.?;
}
return DeserializeRet{
.deserialized = toVal(values.toOwnedSlice(), []const Value),
.new_bytes = null,
};
},
0xdd => {
// Array32
var len: usize = deserializeSomething32(u32, bytes[1..5]).Uint32;
var values = try ArrayList(Value).initCapacity(&allocator.*.allocator, len);
var new_bytes = bytes[5 .. len + 5];
var i: usize = 0;
while (i < len) : (i += 1) {
const d = try deserializePrivate(allocator, new_bytes);
try values.append(d.deserialized);
new_bytes = d.new_bytes.?;
}
return DeserializeRet{
.deserialized = toVal(values.toOwnedSlice(), []const Value),
.new_bytes = null,
};
},
else => return DeserializeRet{ .deserialized = Value{ .Null = {} }, .new_bytes = null },
}
}
pub fn deserialize(allocator: *std.heap.ArenaAllocator, bytes: []const u8) anyerror!Value {
return (try deserializePrivate(allocator, bytes)).deserialized;
} | src/msgpack.zig |
const std = @import("std");
const builtin = @import("builtin");
pub const Parity = enum {
/// No parity bit is used
none,
/// Parity bit is `0` when an even number of bits is set in the data.
even,
/// Parity bit is `0` when an odd number of bits is set in the data.
odd,
/// Parity bit is always `1`
mark,
/// Parity bit is always `0`
space,
};
pub const StopBits = enum {
/// The length of the stop bit is 1 bit
one,
/// The length of the stop bit is 2 bits
two,
};
pub const Handshake = enum {
/// No handshake is used
none,
/// XON-XOFF software handshake is used.
software,
/// Hardware handshake with RTS/CTS is used.
hardware,
};
pub const SerialConfig = struct {
/// Symbol rate in bits/second. Not that these
/// include also parity and stop bits.
baud_rate: u32,
/// Parity to verify transport integrity.
parity: Parity = .none,
/// Number of stop bits after the data
stop_bits: StopBits = .one,
/// Number of data bits per word.
/// Allowed values are 5, 6, 7, 8
word_size: u4 = 8,
/// Defines the handshake protocol used.
handshake: Handshake = .none,
};
// from linux headers
const OPOST = 0o0000001;
const ISIG = 0o0000001; // Enable signals.
const ICANON = 0o0000002; // Canonical input (erase and kill processing).
const XCASE = 0o0000004;
const ECHO = 0o0000010; // Enable echo.
const ECHOE = 0o0000020; // Echo erase character as error-correcting backspace.
const ECHOK = 0o0000040; // Echo KILL.
const ECHONL = 0o0000100; // Echo NL.
const NOFLSH = 0o0000200; // Disable flush after interrupt or quit.
const TOSTOP = 0o0000400; // Send SIGTTOU for background output.
const IEXTEN = 0o0100000;
const CBAUD = 0o000000010017; //Baud speed mask (not in POSIX).
const CBAUDEX = 0o000000010000;
const CSIZE = 0o0000060;
const CS5 = 0o0000000;
const CS6 = 0o0000020;
const CS7 = 0o0000040;
const CS8 = 0o0000060;
const CSTOPB = 0o0000100;
const CREAD = 0o0000200;
const PARENB = 0o0000400;
const PARODD = 0o0001000;
const HUPCL = 0o0002000;
const CLOCAL = 0o0004000;
const CMSPAR = 0o010000000000;
const CRTSCTS = 0o020000000000;
const IGNBRK = 0o0000001;
const BRKINT = 0o0000002;
const IGNPAR = 0o0000004;
const PARMRK = 0o0000010;
const INPCK = 0o0000020;
const ISTRIP = 0o0000040;
const INLCR = 0o0000100;
const IGNCR = 0o0000200;
const ICRNL = 0o0000400;
const IUCLC = 0o0001000;
const IXON = 0o0002000;
const IXANY = 0o0004000;
const IXOFF = 0o0010000;
const IMAXBEL = 0o0020000;
const IUTF8 = 0o0040000;
const VINTR = 0;
const VQUIT = 1;
const VERASE = 2;
const VKILL = 3;
const VEOF = 4;
const VTIME = 5;
const VMIN = 6;
const VSWTC = 7;
const VSTART = 8;
const VSTOP = 9;
const VSUSP = 10;
const VEOL = 11;
const VREPRINT = 12;
const VDISCARD = 13;
const VWERASE = 14;
const VLNEXT = 15;
const VEOL2 = 16;
/// This function configures a serial port with the given config.
/// `port` is an already opened serial port, on windows these
/// are either called `\\.\COMxx\` or `COMx`, on unixes the serial
/// port is called `/dev/ttyXXX`.
pub fn configureSerialPort(port: std.fs.File, config: SerialConfig) !void {
switch (builtin.os.tag) {
.windows => {
var dcb = std.mem.zeroes(DCB);
dcb.DCBlength = @sizeOf(DCB);
if (GetCommState(port.handle, &dcb) == 0)
return error.WindowsError;
std.debug.warn("dcb = {}\n", .{dcb});
dcb.BaudRate = config.baud_rate;
dcb.fBinary = 1;
dcb.fParity = if (config.parity != .none) @as(u1, 1) else @as(u1, 0);
dcb.fOutxCtsFlow = if (config.handshake == .hardware) @as(u1, 1) else @as(u1, 0);
dcb.fOutxDsrFlow = 0;
dcb.fDtrControl = 0;
dcb.fDsrSensitivity = 0;
dcb.fTXContinueOnXoff = 0;
dcb.fOutX = if (config.handshake == .software) @as(u1, 1) else @as(u1, 0);
dcb.fInX = if (config.handshake == .software) @as(u1, 1) else @as(u1, 0);
dcb.fErrorChar = 0;
dcb.fNull = 0;
dcb.fRtsControl = if (config.handshake == .hardware) @as(u1, 1) else @as(u1, 0);
dcb.fAbortOnError = 0;
dcb.wReserved = 0;
dcb.ByteSize = config.word_size;
dcb.Parity = switch (config.parity) {
.none => @as(u8, 0),
.even => @as(u8, 2),
.odd => @as(u8, 1),
.mark => @as(u8, 3),
.space => @as(u8, 4),
};
dcb.StopBits = switch (config.stop_bits) {
.one => @as(u2, 0),
.two => @as(u2, 2),
};
dcb.XonChar = 0x11;
dcb.XoffChar = 0x13;
// dcb.ErrorChar = 0xFF;
// dcb.EofChar = 0x00;
// dcb.EvtChar = ;
dcb.wReserved1 = 0;
if (SetCommState(port.handle, &dcb) == 0)
return error.WindowsError;
},
.linux => {
var settings = try std.os.tcgetattr(port.handle);
settings.iflag = 0;
settings.oflag = 0;
settings.cflag = CREAD;
settings.lflag = 0;
settings.ispeed = 0;
settings.ospeed = 0;
// settings.iflag &= ~@as(std.os.tcflag_t, IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON);
// settings.oflag &= ~@as(std.os.tcflag_t, OPOST);
// settings.lflag &= ~@as(std.os.tcflag_t, ECHO | ECHONL | ICANON | ISIG | IEXTEN);
// settings.cflag &= ~@as(std.os.tcflag_t, CSIZE | PARENB);
// settings.cflag |= CS8;
// settings.cflag &= ~@as(std.os.tcflag_t, PARODD | CMSPAR);
switch (config.parity) {
.none => {},
.odd => settings.cflag |= PARODD,
.even => {}, // even parity is default
.mark => settings.cflag |= PARODD | CMSPAR,
.space => settings.cflag |= CMSPAR,
}
if (config.parity != .none) {
settings.iflag |= INPCK; // enable parity checking
settings.cflag |= PARENB; // enable parity generation
}
// else {
// settings.iflag &= ~@as(std.os.tcflag_t, INPCK); // disable parity checking
// settings.cflag &= ~@as(std.os.tcflag_t, PARENB); // disable parity generation
// }
switch (config.handshake) {
.none => settings.cflag |= CLOCAL,
.software => settings.iflag |= IXON | IXOFF,
.hardware => settings.cflag |= CRTSCTS,
}
switch (config.stop_bits) {
.one => {},
.two => settings.cflag |= CSTOPB,
}
switch (config.word_size) {
5 => settings.cflag |= CS5,
6 => settings.cflag |= CS6,
7 => settings.cflag |= CS7,
8 => settings.cflag |= CS8,
else => return error.UnsupportedWordSize,
}
const baudmask = try mapBaudToLinuxEnum(config.baud_rate);
settings.cflag &= ~@as(std.os.linux.tcflag_t, CBAUD);
settings.cflag |= baudmask;
settings.ispeed = baudmask;
settings.ospeed = baudmask;
settings.cc[VMIN] = 1;
settings.cc[VSTOP] = 0x13; // XOFF
settings.cc[VSTART] = 0x11; // XON
settings.cc[VTIME] = 0;
try std.os.tcsetattr(port.handle, .NOW, settings);
},
else => @compileError("unsupported OS, please implement!"),
}
}
/// Flushes the serial port `port`. If `input` is set, all pending data in
/// the receive buffer is flushed, if `output` is set all pending data in
/// the send buffer is flushed.
pub fn flushSerialPort(port: std.fs.File, input: bool, output: bool) !void {
switch (builtin.os.tag) {
.windows => {
const success = if (input and output)
PurgeComm(port.handle, PURGE_TXCLEAR | PURGE_RXCLEAR)
else if (input)
PurgeComm(port.handle, PURGE_RXCLEAR)
else if (output)
PurgeComm(port.handle, PURGE_TXCLEAR)
else
@as(std.os.windows.BOOL, 0);
if (success == 0)
return error.FlushError;
},
.linux => if (input and output)
try tcflush(port.handle, TCIOFLUSH)
else if (input)
try tcflush(port.handle, TCIFLUSH)
else if (output)
try tcflush(port.handle, TCOFLUSH),
else => @compileError("unsupported OS, please implement!"),
}
}
const PURGE_RXABORT = 0x0002;
const PURGE_RXCLEAR = 0x0008;
const PURGE_TXABORT = 0x0001;
const PURGE_TXCLEAR = 0x0004;
extern "kernel32" fn PurgeComm(hFile: std.os.windows.HANDLE, dwFlags: std.os.windows.DWORD) callconv(.Stdcall) std.os.windows.BOOL;
const TCIFLUSH = 0;
const TCOFLUSH = 1;
const TCIOFLUSH = 2;
const TCFLSH = 0x540B;
fn tcflush(fd: std.os.fd_t, mode: usize) !void {
if (std.os.linux.syscall3(.ioctl, @bitCast(usize, @as(isize, fd)), TCFLSH, mode) != 0)
return error.FlushError;
}
fn mapBaudToLinuxEnum(baudrate: usize) !std.os.linux.speed_t {
return switch (baudrate) {
// from termios.h
50 => std.os.linux.B50,
75 => std.os.linux.B75,
110 => std.os.linux.B110,
134 => std.os.linux.B134,
150 => std.os.linux.B150,
200 => std.os.linux.B200,
300 => std.os.linux.B300,
600 => std.os.linux.B600,
1200 => std.os.linux.B1200,
1800 => std.os.linux.B1800,
2400 => std.os.linux.B2400,
4800 => std.os.linux.B4800,
9600 => std.os.linux.B9600,
19200 => std.os.linux.B19200,
38400 => std.os.linux.B38400,
// from termios-baud.h
57600 => std.os.linux.B57600,
115200 => std.os.linux.B115200,
230400 => std.os.linux.B230400,
460800 => std.os.linux.B460800,
500000 => std.os.linux.B500000,
576000 => std.os.linux.B576000,
921600 => std.os.linux.B921600,
1000000 => std.os.linux.B1000000,
1152000 => std.os.linux.B1152000,
1500000 => std.os.linux.B1500000,
2000000 => std.os.linux.B2000000,
2500000 => std.os.linux.B2500000,
3000000 => std.os.linux.B3000000,
3500000 => std.os.linux.B3500000,
4000000 => std.os.linux.B4000000,
else => error.UnsupportedBaudRate,
};
}
const DCB = extern struct {
DCBlength: std.os.windows.DWORD,
BaudRate: std.os.windows.DWORD,
fBinary: std.os.windows.DWORD, // u1
fParity: std.os.windows.DWORD, // u1
fOutxCtsFlow: std.os.windows.DWORD, // u1
fOutxDsrFlow: std.os.windows.DWORD, // u1
fDtrControl: std.os.windows.DWORD, // u2
fDsrSensitivity: std.os.windows.DWORD,
fTXContinueOnXoff: std.os.windows.DWORD,
fOutX: std.os.windows.DWORD, // u1
fInX: std.os.windows.DWORD, // u1
fErrorChar: std.os.windows.DWORD, // u1
fNull: std.os.windows.DWORD, // u1
fRtsControl: std.os.windows.DWORD, // u2
fAbortOnError: std.os.windows.DWORD, // u1
fDummy2: std.os.windows.DWORD, // u17
wReserved: std.os.windows.WORD,
XonLim: std.os.windows.WORD,
XoffLim: std.os.windows.WORD,
ByteSize: std.os.windows.BYTE,
Parity: std.os.windows.BYTE,
StopBits: std.os.windows.BYTE,
XonChar: u8,
XoffChar: u8,
ErrorChar: u8,
EofChar: u8,
EvtChar: u8,
wReserved1: std.os.windows.WORD,
};
extern "kernel32" fn GetCommState(hFile: std.os.windows.HANDLE, lpDCB: *DCB) callconv(.Stdcall) std.os.windows.BOOL;
extern "kernel32" fn SetCommState(hFile: std.os.windows.HANDLE, lpDCB: *DCB) callconv(.Stdcall) std.os.windows.BOOL;
test "basic configuration test" {
var cfg = SerialConfig{
.handshake = .none,
.baud_rate = 9600,
.parity = .none,
.word_size = 8,
.stop_bits = .one,
};
var port = try std.fs.cwd().openFile(
if (std.builtin.os.tag == .windows) "\\\\.\\COM5" else "/dev/ttyUSB0", // if any, these will likely exist on a machine
.{ .read = true, .write = true },
);
defer port.close();
try configureSerialPort(port, cfg);
}
test "basic flush test" {
var port = try std.fs.cwd().openFile(
if (std.builtin.os.tag == .windows) "\\\\.\\COM5" else "/dev/ttyUSB0", // if any, these will likely exist on a machine
.{ .read = true, .write = true },
);
defer port.close();
try flushSerialPort(port, true, true);
try flushSerialPort(port, true, false);
try flushSerialPort(port, false, true);
try flushSerialPort(port, false, false);
} | serial.zig |
const std = @import("../std.zig");
const CpuFeature = std.Target.Cpu.Feature;
const CpuModel = std.Target.Cpu.Model;
pub const Feature = enum {
deprecated_v8,
detectroundchange,
fixallfdivsqrt,
hard_quad_float,
hasleoncasa,
hasumacsmac,
insertnopload,
leon,
leoncyclecounter,
leonpwrpsr,
no_fmuls,
no_fsmuld,
popc,
soft_float,
soft_mul_div,
v9,
vis,
vis2,
vis3,
};
pub usingnamespace CpuFeature.feature_set_fns(Feature);
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@enumToInt(Feature.deprecated_v8)] = .{
.llvm_name = "deprecated-v8",
.description = "Enable deprecated V8 instructions in V9 mode",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.detectroundchange)] = .{
.llvm_name = "detectroundchange",
.description = "LEON3 erratum detection: Detects any rounding mode change request: use only the round-to-nearest rounding mode",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.fixallfdivsqrt)] = .{
.llvm_name = "fixallfdivsqrt",
.description = "LEON erratum fix: Fix FDIVS/FDIVD/FSQRTS/FSQRTD instructions with NOPs and floating-point store",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.hard_quad_float)] = .{
.llvm_name = "hard-quad-float",
.description = "Enable quad-word floating point instructions",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.hasleoncasa)] = .{
.llvm_name = "hasleoncasa",
.description = "Enable CASA instruction for LEON3 and LEON4 processors",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.hasumacsmac)] = .{
.llvm_name = "hasumacsmac",
.description = "Enable UMAC and SMAC for LEON3 and LEON4 processors",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.insertnopload)] = .{
.llvm_name = "insertnopload",
.description = "LEON3 erratum fix: Insert a NOP instruction after every single-cycle load instruction when the next instruction is another load/store instruction",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.leon)] = .{
.llvm_name = "leon",
.description = "Enable LEON extensions",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.leoncyclecounter)] = .{
.llvm_name = "leoncyclecounter",
.description = "Use the Leon cycle counter register",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.leonpwrpsr)] = .{
.llvm_name = "leonpwrpsr",
.description = "Enable the PWRPSR instruction",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.no_fmuls)] = .{
.llvm_name = "no-fmuls",
.description = "Disable the fmuls instruction.",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.no_fsmuld)] = .{
.llvm_name = "no-fsmuld",
.description = "Disable the fsmuld instruction.",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.popc)] = .{
.llvm_name = "popc",
.description = "Use the popc (population count) instruction",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.soft_float)] = .{
.llvm_name = "soft-float",
.description = "Use software emulation for floating point",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.soft_mul_div)] = .{
.llvm_name = "soft-mul-div",
.description = "Use software emulation for integer multiply and divide",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.v9)] = .{
.llvm_name = "v9",
.description = "Enable SPARC-V9 instructions",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.vis)] = .{
.llvm_name = "vis",
.description = "Enable UltraSPARC Visual Instruction Set extensions",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.vis2)] = .{
.llvm_name = "vis2",
.description = "Enable Visual Instruction Set extensions II",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.vis3)] = .{
.llvm_name = "vis3",
.description = "Enable Visual Instruction Set extensions III",
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
for (result) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
break :blk result;
};
pub const cpu = struct {
pub const at697e = CpuModel{
.name = "at697e",
.llvm_name = "at697e",
.features = featureSet(&[_]Feature{
.insertnopload,
.leon,
}),
};
pub const at697f = CpuModel{
.name = "at697f",
.llvm_name = "at697f",
.features = featureSet(&[_]Feature{
.insertnopload,
.leon,
}),
};
pub const f934 = CpuModel{
.name = "f934",
.llvm_name = "f934",
.features = featureSet(&[_]Feature{}),
};
pub const gr712rc = CpuModel{
.name = "gr712rc",
.llvm_name = "gr712rc",
.features = featureSet(&[_]Feature{
.hasleoncasa,
.leon,
}),
};
pub const gr740 = CpuModel{
.name = "gr740",
.llvm_name = "gr740",
.features = featureSet(&[_]Feature{
.hasleoncasa,
.hasumacsmac,
.leon,
.leoncyclecounter,
.leonpwrpsr,
}),
};
pub const hypersparc = CpuModel{
.name = "hypersparc",
.llvm_name = "hypersparc",
.features = featureSet(&[_]Feature{}),
};
pub const leon2 = CpuModel{
.name = "leon2",
.llvm_name = "leon2",
.features = featureSet(&[_]Feature{
.leon,
}),
};
pub const leon3 = CpuModel{
.name = "leon3",
.llvm_name = "leon3",
.features = featureSet(&[_]Feature{
.hasumacsmac,
.leon,
}),
};
pub const leon4 = CpuModel{
.name = "leon4",
.llvm_name = "leon4",
.features = featureSet(&[_]Feature{
.hasleoncasa,
.hasumacsmac,
.leon,
}),
};
pub const ma2080 = CpuModel{
.name = "ma2080",
.llvm_name = "ma2080",
.features = featureSet(&[_]Feature{
.hasleoncasa,
.leon,
}),
};
pub const ma2085 = CpuModel{
.name = "ma2085",
.llvm_name = "ma2085",
.features = featureSet(&[_]Feature{
.hasleoncasa,
.leon,
}),
};
pub const ma2100 = CpuModel{
.name = "ma2100",
.llvm_name = "ma2100",
.features = featureSet(&[_]Feature{
.hasleoncasa,
.leon,
}),
};
pub const ma2150 = CpuModel{
.name = "ma2150",
.llvm_name = "ma2150",
.features = featureSet(&[_]Feature{
.hasleoncasa,
.leon,
}),
};
pub const ma2155 = CpuModel{
.name = "ma2155",
.llvm_name = "ma2155",
.features = featureSet(&[_]Feature{
.hasleoncasa,
.leon,
}),
};
pub const ma2450 = CpuModel{
.name = "ma2450",
.llvm_name = "ma2450",
.features = featureSet(&[_]Feature{
.hasleoncasa,
.leon,
}),
};
pub const ma2455 = CpuModel{
.name = "ma2455",
.llvm_name = "ma2455",
.features = featureSet(&[_]Feature{
.hasleoncasa,
.leon,
}),
};
pub const ma2480 = CpuModel{
.name = "ma2480",
.llvm_name = "ma2480",
.features = featureSet(&[_]Feature{
.hasleoncasa,
.leon,
}),
};
pub const ma2485 = CpuModel{
.name = "ma2485",
.llvm_name = "ma2485",
.features = featureSet(&[_]Feature{
.hasleoncasa,
.leon,
}),
};
pub const ma2x5x = CpuModel{
.name = "ma2x5x",
.llvm_name = "ma2x5x",
.features = featureSet(&[_]Feature{
.hasleoncasa,
.leon,
}),
};
pub const ma2x8x = CpuModel{
.name = "ma2x8x",
.llvm_name = "ma2x8x",
.features = featureSet(&[_]Feature{
.hasleoncasa,
.leon,
}),
};
pub const myriad2 = CpuModel{
.name = "myriad2",
.llvm_name = "myriad2",
.features = featureSet(&[_]Feature{
.hasleoncasa,
.leon,
}),
};
pub const myriad2_1 = CpuModel{
.name = "myriad2_1",
.llvm_name = "myriad2.1",
.features = featureSet(&[_]Feature{
.hasleoncasa,
.leon,
}),
};
pub const myriad2_2 = CpuModel{
.name = "myriad2_2",
.llvm_name = "myriad2.2",
.features = featureSet(&[_]Feature{
.hasleoncasa,
.leon,
}),
};
pub const myriad2_3 = CpuModel{
.name = "myriad2_3",
.llvm_name = "myriad2.3",
.features = featureSet(&[_]Feature{
.hasleoncasa,
.leon,
}),
};
pub const niagara = CpuModel{
.name = "niagara",
.llvm_name = "niagara",
.features = featureSet(&[_]Feature{
.deprecated_v8,
.v9,
.vis,
.vis2,
}),
};
pub const niagara2 = CpuModel{
.name = "niagara2",
.llvm_name = "niagara2",
.features = featureSet(&[_]Feature{
.deprecated_v8,
.popc,
.v9,
.vis,
.vis2,
}),
};
pub const niagara3 = CpuModel{
.name = "niagara3",
.llvm_name = "niagara3",
.features = featureSet(&[_]Feature{
.deprecated_v8,
.popc,
.v9,
.vis,
.vis2,
}),
};
pub const niagara4 = CpuModel{
.name = "niagara4",
.llvm_name = "niagara4",
.features = featureSet(&[_]Feature{
.deprecated_v8,
.popc,
.v9,
.vis,
.vis2,
.vis3,
}),
};
pub const sparclet = CpuModel{
.name = "sparclet",
.llvm_name = "sparclet",
.features = featureSet(&[_]Feature{}),
};
pub const sparclite = CpuModel{
.name = "sparclite",
.llvm_name = "sparclite",
.features = featureSet(&[_]Feature{}),
};
pub const sparclite86x = CpuModel{
.name = "sparclite86x",
.llvm_name = "sparclite86x",
.features = featureSet(&[_]Feature{}),
};
pub const supersparc = CpuModel{
.name = "supersparc",
.llvm_name = "supersparc",
.features = featureSet(&[_]Feature{}),
};
pub const tsc701 = CpuModel{
.name = "tsc701",
.llvm_name = "tsc701",
.features = featureSet(&[_]Feature{}),
};
pub const ultrasparc = CpuModel{
.name = "ultrasparc",
.llvm_name = "ultrasparc",
.features = featureSet(&[_]Feature{
.deprecated_v8,
.v9,
.vis,
}),
};
pub const ultrasparc3 = CpuModel{
.name = "ultrasparc3",
.llvm_name = "ultrasparc3",
.features = featureSet(&[_]Feature{
.deprecated_v8,
.v9,
.vis,
.vis2,
}),
};
pub const ut699 = CpuModel{
.name = "ut699",
.llvm_name = "ut699",
.features = featureSet(&[_]Feature{
.fixallfdivsqrt,
.insertnopload,
.leon,
.no_fmuls,
.no_fsmuld,
}),
};
pub const v7 = CpuModel{
.name = "v7",
.llvm_name = "v7",
.features = featureSet(&[_]Feature{
.no_fsmuld,
.soft_mul_div,
}),
};
pub const v8 = CpuModel{
.name = "v8",
.llvm_name = "v8",
.features = featureSet(&[_]Feature{}),
};
pub const v9 = CpuModel{
.name = "v9",
.llvm_name = "v9",
.features = featureSet(&[_]Feature{
.v9,
}),
};
}; | lib/std/target/sparc.zig |
const config = @import("config.zig");
const std = @import("std");
const c = std.c;
const debug = std.debug;
const heap = std.heap;
const math = std.math;
const mem = std.mem;
const os = std.os;
const unicode = std.unicode;
const allocator = heap.c_allocator;
const ESC_BUF_SIZ = (128 * @sizeOf(Rune));
const ESC_ARG_SIZ = 16;
const STR_BUF_SIZ = ESC_BUF_SIZ;
const STR_ARG_SIZ = ESC_ARG_SIZ;
const CURSOR_SAVE = 0;
const CURSOR_LOAD = 1;
const CURSOR_DEFAULT = 0;
const CURSOR_WRAPNEXT = 1;
const CURSOR_ORIGIN = 2;
const SNAP_WORD = 1;
const SNAP_LINE = 2;
const MODE_WRAP = 1 << 0;
const MODE_INSERT = 1 << 1;
const MODE_ALTSCREEN = 1 << 2;
const MODE_CRLF = 1 << 3;
const MODE_ECHO = 1 << 4;
const MODE_PRINT = 1 << 5;
const MODE_UTF8 = 1 << 6;
const MODE_SIXEL = 1 << 7;
const SEL_REGULAR = 1;
const SEL_RECTANGULAR = 2;
const SEL_IDLE = 0;
const SEL_EMPTY = 1;
const SEL_READY = 2;
const ESC_START = 1 << 0;
const ESC_CSI = 1 << 1;
/// OSC, PM, APC
const ESC_STR = 1 << 2;
const ESC_ALTCHARSET = 1 << 3;
/// a final string was encountered
const ESC_STR_END = 1 << 4;
/// Enter in test mode
const ESC_TEST = 1 << 5;
const ESC_UTF8 = 1 << 6;
const ESC_DCS = 1 << 7;
const ATTR_NULL = 0;
const ATTR_BOLD = 1 << 0;
const ATTR_FAINT = 1 << 1;
const ATTR_ITALIC = 1 << 2;
const ATTR_UNDERLINE = 1 << 3;
const ATTR_BLINK = 1 << 4;
const ATTR_REVERSE = 1 << 5;
const ATTR_INVISIBLE = 1 << 6;
const ATTR_STRUCK = 1 << 7;
const ATTR_WRAP = 1 << 8;
const ATTR_WIDE = 1 << 9;
const ATTR_WDUMMY = 1 << 10;
const ATTR_BOLD_FAINT = ATTR_BOLD | ATTR_FAINT;
const Glyph = extern struct {
u: Rune,
mode: c_ushort,
fg: u32,
bg: u32,
};
const TCursor = extern struct {
attr: Glyph,
x: c_int,
y: c_int,
state: u8,
};
/// STR Escape sequence structs
/// ESC type [[ [<priv>] <arg> [;]] <mode>] ESC '\'
const STREscape = extern struct {
@"type": u8,
buf: [STR_BUF_SIZ]u8,
len: c_int,
args: [STR_ARG_SIZ]?[*]u8,
narg: c_int,
const zero = STREscape{
.@"type" = 0,
.buf = []u8{0} ** STR_BUF_SIZ,
.len = 0,
.args = []?[*]u8{null} ** STR_ARG_SIZ,
.narg = 0,
};
};
const Line = [*]Glyph;
/// Internal representation of the screen
const Term = extern struct {
row: c_int,
col: c_int,
line: [*]Line,
alt: [*]Line,
dirty: [*]c_int,
c: TCursor,
ocx: c_int,
ocy: c_int,
top: c_int,
bot: c_int,
mode: c_int,
esc: c_int,
trantbl: [4]u8,
charset: c_int,
icharset: c_int,
tabs: *c_int,
};
const Point = extern struct {
x: c_int,
y: c_int,
};
const Selection = extern struct {
mode: c_int,
@"type": c_int,
snap: c_int,
/// Selection variables:
/// nb - normalized coordinates of the beginning of the selection
/// ne - normalized coordinates of the end of the selection
/// ob - original coordinates of the beginning of the selection
/// oe - original coordinates of the end of the selection
nb: Point,
ne: Point,
ob: Point,
oe: Point,
alt: c_int,
};
// This was a typedef of uint_least32_t. Is there anywhere where
// uint_least32_t != uint32_t in practice?
const Rune = u32;
const UTF_INVALID = 0xFFFD;
extern var sel: Selection;
extern var iofd: c_int;
extern var term: Term;
extern var strescseq: STREscape;
extern fn xdrawline(Line, c_int, c_int, c_int) void;
extern fn xstartdraw() c_int;
extern fn xfinishdraw() void;
extern fn xdrawcursor(c_int, c_int, Glyph, c_int, c_int, Glyph) void;
extern fn xsettitle(?[*]const u8) void;
pub export fn xmalloc(len: usize) *c_void {
return c.malloc(len).?;
}
pub export fn xrealloc(p: *c_void, len: usize) *c_void {
return c.realloc(p, len).?;
}
pub export fn xstrdup(s: [*]u8) [*]u8 {
const len = mem.len(u8, s);
const new = allocator.alloc(u8, len + 1) catch unreachable;
mem.copy(u8, new, s[0 .. len + 1]);
return new.ptr;
}
pub export fn tprinter(s: [*]const u8, len: usize) void {
if (iofd != -1) {
os.posixWrite(@intCast(i32, iofd), s[0..len]) catch {
debug.warn("Error writing to output file\n");
os.close(@intCast(i32, iofd));
iofd = -1;
};
}
}
pub export fn utf8decode(s: [*]const u8, u: *Rune, slen: usize) usize {
u.* = UTF_INVALID;
if (slen == 0)
return slen;
const len = unicode.utf8ByteSequenceLength(s[0]) catch return 0;
if (slen < len)
return 0;
u.* = unicode.utf8Decode(s[0..len]) catch return 0;
return len;
}
pub export fn utf8encode(u: Rune, s: [*]u8) usize {
const len = unicode.utf8CodepointSequenceLength(u) catch return 0;
return unicode.utf8Encode(u, s[0..len]) catch unreachable;
}
pub export fn utf8strchr(s: [*]const u8, u: Rune) ?[*]const u8 {
const len = mem.len(u8, s);
const slice = s[0..len];
var i: usize = 0;
while (i < len) {
const plen = unicode.utf8ByteSequenceLength(slice[i]) catch break;
if (len < i + plen)
break;
const p = unicode.utf8Decode(slice[i..][0..plen]) catch break;
i += len;
if (p == u)
return slice[i..].ptr;
}
var iter = unicode.Utf8Iterator{
.bytes = s[0..len],
.i = 0,
};
return null;
}
pub export fn drawregion(x1: c_int, y1: c_int, x2: c_int, y2: c_int) void {
var y: c_int = y1;
while (y < y2) : (y += 1) {
const uy = @intCast(usize, y);
if (term.dirty[uy] == 0)
continue;
term.dirty[uy] = 0;
xdrawline(term.line[uy], x1, y, x2);
}
}
pub export fn tsetdirt(top: c_int, bot: c_int) void {
const ltop = limit(top, 0, term.row - 1);
const lbot = limit(bot, 0, term.row - 1);
mem.set(c_int, term.dirty[@intCast(usize, ltop)..@intCast(usize, lbot + 1)], 1);
}
pub export fn tfulldirt() void {
tsetdirt(0, term.row - 1);
}
pub export fn draw() void {
var cx = term.c.x;
if (xstartdraw() == 0)
return;
term.ocx = limit(term.ocx, 0, term.col - 1);
term.ocy = limit(term.ocy, 0, term.row - 1);
if (term.line[@intCast(usize, term.ocy)][@intCast(usize, term.ocx)].mode & ATTR_WDUMMY != 0)
term.ocx -= 1;
if (term.line[@intCast(usize, term.c.y)][@intCast(usize, cx)].mode & ATTR_WDUMMY != 0)
cx -= 1;
drawregion(0, 0, term.col, term.row);
xdrawcursor(
cx,
term.c.y,
term.line[@intCast(usize, term.c.y)][@intCast(usize, cx)],
term.ocx,
term.ocy,
term.line[@intCast(usize, term.ocy)][@intCast(usize, term.ocx)],
);
term.ocx = cx;
term.ocy = term.c.y;
xfinishdraw();
}
pub fn limit(x: var, a: @typeOf(x), b: @typeOf(x)) @typeOf(x) {
var res = math.max(x, a);
return math.min(res, b);
}
pub export fn resettitle() void {
xsettitle(null);
}
pub export fn redraw() void {
tfulldirt();
draw();
}
pub export fn tstrsequence(char: u8) void {
strescseq = STREscape.zero;
switch (char) {
// DCS -- Device Control String
0x90 => {
strescseq.@"type" = 'P';
term.esc |= ESC_DCS;
},
// APC -- Application Program Command
0x9f => strescseq.@"type" = '_',
// PM -- Privacy Message
0x9e => strescseq.@"type" = '^',
// OSC -- Operating System Command
0x9d => strescseq.@"type" = ']',
else => strescseq.@"type" = char,
}
term.esc |= ESC_STR;
}
pub export fn selected(x: c_int, y: c_int) c_int {
if (sel.mode == SEL_EMPTY or sel.ob.x == -1 or (sel.alt != 0) != isSet(MODE_ALTSCREEN))
return 0;
if (sel.@"type" == SEL_RECTANGULAR)
return @boolToInt(between(y, sel.nb.y, sel.ne.y) and
between(x, sel.nb.x, sel.ne.x));
return @boolToInt(between(y, sel.nb.y, sel.ne.y) and
(y != sel.nb.y or x >= sel.nb.x) and
(y != sel.ne.y or x <= sel.ne.x));
}
fn between(x: var, a: @typeOf(x), b: @typeOf(x)) bool {
return a <= x and x <= b;
}
pub export fn selsnap(x: *c_int, y: *c_int, direction: c_int) void {
switch (sel.snap) {
SNAP_WORD => {
// Snap around if the word wraps around at the end or
// beginning of a line.
var prevgp = &term.line[@intCast(usize, y.*)][@intCast(usize, x.*)];
var prevdelim = isDelim(prevgp.u);
while (true) {
var newx = x.* + direction;
var newy = y.*;
if (!between(newx, 0, term.col - 1)) {
newy += direction;
newx = (newx + term.col) & term.col;
if (between(newy, 0, term.row - 1))
break;
var yt: usize = undefined;
var xt: usize = undefined;
if (direction > 0) {
yt = @intCast(usize, y.*);
xt = @intCast(usize, x.*);
} else {
yt = @intCast(usize, newy);
xt = @intCast(usize, newx);
}
if ((term.line[yt][xt].mode & ATTR_WRAP) == 0)
break;
}
if (newx >= tlinelen(newy))
break;
const gp = &term.line[@intCast(usize, newy)][@intCast(usize, newx)];
const delim = isDelim(gp.u);
if ((gp.mode & ATTR_WDUMMY == 0) and (delim != prevdelim or (delim and gp.u != prevgp.u)))
break;
x.* = newx;
y.* = newy;
prevgp = gp;
prevdelim = delim;
}
},
SNAP_LINE => {
// Snap around if the the previous line or the current one
// has set ATTR_WRAP at its end. Then the whole next or
// previous line will be selected.
x.* = if (direction < 0) 0 else term.col - 1;
if (direction < 0) {
while (y.* > 0) : (y.* += direction) {
if ((term.line[@intCast(usize, y.* - 1)][@intCast(usize, term.col - 1)].mode & ATTR_WRAP) == 0) {
break;
}
}
} else if (direction > 0) {
while (y.* < term.row - 1) : (y.* += direction) {
if ((term.line[@intCast(usize, y.*)][@intCast(usize, term.col - 1)].mode & ATTR_WRAP) == 0) {
break;
}
}
}
},
else => {},
}
}
fn isDelim(u: Rune) bool {
return utf8strchr(config.worddelimiters, u) != null;
}
pub export fn tlinelen(y: c_int) c_int {
var i = term.col;
if (term.line[@intCast(usize, y)][@intCast(usize, i - 1)].mode & ATTR_WRAP != 0)
return i;
while (i > 0 and term.line[@intCast(usize, y)][@intCast(usize, i - 1)].u == ' ')
i -= 1;
return i;
}
pub export fn tmoveto(x: c_int, y: c_int) void {
var miny: c_int = undefined;
var maxy: c_int = undefined;
if (term.c.state & CURSOR_ORIGIN != 0) {
miny = term.top;
maxy = term.bot;
} else {
miny = 0;
maxy = term.row - 1;
}
term.c.state &= ~u8(CURSOR_WRAPNEXT);
term.c.x = limit(x, 0, term.col - 1);
term.c.y = limit(y, miny, maxy);
}
pub export fn selinit() void {
sel.mode = SEL_IDLE;
sel.snap = 0;
sel.ob.x = -1;
}
pub export fn selclear() void {
if (sel.ob.x == -1)
return;
sel.mode = SEL_IDLE;
sel.ob.x = -1;
tsetdirt(sel.nb.y, sel.ne.y);
}
pub export fn selnormalize() void {
if (sel.@"type" == SEL_REGULAR and sel.ob.y != sel.oe.y) {
sel.nb.x = if (sel.ob.y < sel.oe.y) sel.ob.x else sel.oe.x;
sel.ne.x = if (sel.ob.y < sel.oe.y) sel.oe.x else sel.ob.x;
} else {
sel.nb.x = math.min(sel.ob.x, sel.oe.x);
sel.ne.x = math.max(sel.ob.x, sel.oe.x);
}
sel.nb.y = math.min(sel.ob.y, sel.oe.y);
sel.ne.y = math.max(sel.ob.y, sel.oe.y);
selsnap(&sel.nb.x, &sel.nb.y, -1);
selsnap(&sel.ne.x, &sel.ne.y, 1);
// expand selection over line breaks
if (sel.@"type" == SEL_RECTANGULAR)
return;
const i = tlinelen(sel.nb.y);
if (i < sel.nb.x)
sel.nb.x = i;
if (tlinelen(sel.ne.y) <= sel.ne.x)
sel.ne.x = term.col - 1;
}
fn isSet(flag: var) bool {
return (term.mode & flag) != 0;
}
pub export fn selstart(col: c_int, row: c_int, snap: c_int) void {
selclear();
sel.mode = SEL_EMPTY;
sel.@"type" = SEL_REGULAR;
sel.alt = @boolToInt(isSet(MODE_ALTSCREEN));
sel.snap = snap;
sel.oe.x = col;
sel.ob.x = col;
sel.oe.y = row;
sel.ob.y = row;
selnormalize();
if (sel.snap != 0)
sel.mode = SEL_READY;
tsetdirt(sel.nb.y, sel.ne.y);
}
pub export fn selextend(col: c_int, row: c_int, kind: c_int, done: c_int) void {
if (sel.mode == SEL_IDLE)
return;
if (done != 0 and sel.mode == SEL_EMPTY) {
selclear();
return;
}
const oldey = sel.oe.y;
const oldex = sel.oe.x;
const oldsby = sel.nb.y;
const oldsey = sel.ne.y;
const oldtype = sel.@"type";
sel.oe.x = col;
sel.oe.y = row;
selnormalize();
sel.@"type" = kind;
if (oldey != sel.oe.y or oldex != sel.oe.x or oldtype != sel.@"type")
tsetdirt(math.min(sel.nb.y, oldsby), math.max(sel.ne.y, oldsey));
sel.mode = if (done != 0) u8(SEL_IDLE) else u8(SEL_READY);
}
pub export fn getsel() ?[*]u8 {
if (sel.ob.x == -1)
return null;
const bufsize = (term.col + 1) * (sel.ne.y - sel.nb.y + 1) * @sizeOf(Rune);
const str = allocator.alloc(u8, @intCast(usize, bufsize)) catch unreachable;
// append every set & selected glyph to the selection
var i: usize = 0;
var y: c_int = sel.nb.y;
while (y <= sel.ne.y) : (y += 1) {
const linelen = tlinelen(y);
if (linelen == 0) {
str[i] = '\n';
i += 1;
continue;
}
var gp: usize = undefined;
var lastx: c_int = undefined;
if (sel.@"type" == SEL_RECTANGULAR) {
gp = @intCast(usize, sel.nb.x);
lastx = sel.ne.x;
} else {
gp = if (sel.nb.y == y) @intCast(usize, sel.nb.x) else usize(0);
lastx = if (sel.ne.y == y) sel.ne.x else term.col - 1;
}
const line = term.line[@intCast(usize, y)];
var last = @intCast(usize, math.min(lastx, linelen - 1));
while (last >= gp and line[last].u == ' ')
last -= 1;
while (gp <= last) : (gp += 1) {
const p = line[gp];
if (p.mode & ATTR_WDUMMY != 0)
continue;
i += utf8encode(p.u, str[i..].ptr);
}
// Copy and pasting of line endings is inconsistent
// in the inconsistent terminal and GUI world.
// The best solution seems like to produce '\n' when
// something is copied from st and convert '\n' to
// '\r', when something to be pasted is received by
// st.
// FIXME: Fix the computer world.
if ((y < sel.ne.y or lastx >= linelen) and line[last].mode & ATTR_WRAP == 0) {
str[i] = '\n';
i += 1;
}
}
str[i] = 0;
return str[0..].ptr;
}
pub export fn tattrset(attr: c_int) c_int {
var i: usize = 0;
while (i < @intCast(usize, term.row - 1)) : (i += 1) {
var j: usize = 0;
while (j < @intCast(usize, term.col - 1)) : (j += 1) {
if (@intCast(c_int, term.line[i][j].mode) & attr != 0)
return 1;
}
}
return 0;
}
pub export fn tcursor(mode: c_int) void {
const cur: *[2]TCursor = &struct {
var cur: [2]TCursor = undefined;
}.cur;
const alt = @boolToInt(isSet(MODE_ALTSCREEN));
if (mode == CURSOR_SAVE) {
cur[alt] = term.c;
} else if (mode == CURSOR_LOAD) {
term.c = cur[alt];
tmoveto(cur[alt].x, cur[alt].y);
}
}
pub export fn tswapscreen() void {
const tmp = term.line;
term.line = term.alt;
term.alt = tmp;
term.mode ^= MODE_ALTSCREEN;
tfulldirt();
}
pub export fn tdefutf8(ascii: u8) void {
if (ascii == 'G') {
term.mode |= MODE_UTF8;
} else if (ascii == '@') {
term.mode &= ~c_int(MODE_UTF8);
}
} | src/st.zig |
const builtin = @import("builtin");
const std = @import("../std.zig");
const math = std.math;
const expect = std.testing.expect;
/// Returns the natural logarithm of 1 + x with greater accuracy when x is near zero.
///
/// Special Cases:
/// - log1p(+inf) = +inf
/// - log1p(+-0) = +-0
/// - log1p(-1) = -inf
/// - log1p(x) = nan if x < -1
/// - log1p(nan) = nan
pub fn log1p(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
return switch (T) {
f32 => log1p_32(x),
f64 => log1p_64(x),
else => @compileError("log1p not implemented for " ++ @typeName(T)),
};
}
fn log1p_32(x: f32) f32 {
const ln2_hi = 6.9313812256e-01;
const ln2_lo = 9.0580006145e-06;
const Lg1: f32 = 0xaaaaaa.0p-24;
const Lg2: f32 = 0xccce13.0p-25;
const Lg3: f32 = 0x91e9ee.0p-25;
const Lg4: f32 = 0xf89e26.0p-26;
const u = @bitCast(u32, x);
var ix = u;
var k: i32 = 1;
var f: f32 = undefined;
var c: f32 = undefined;
// 1 + x < sqrt(2)+
if (ix < 0x3ED413D0 or ix >> 31 != 0) {
// x <= -1.0
if (ix >= 0xBF800000) {
// log1p(-1) = -inf
if (x == -1.0) {
return -math.inf(f32);
}
// log1p(x < -1) = nan
else {
return math.nan(f32);
}
}
// |x| < 2^(-24)
if ((ix << 1) < (0x33800000 << 1)) {
// underflow if subnormal
if (ix & 0x7F800000 == 0) {
math.forceEval(x * x);
}
return x;
}
// sqrt(2) / 2- <= 1 + x < sqrt(2)+
if (ix <= 0xBE95F619) {
k = 0;
c = 0;
f = x;
}
} else if (ix >= 0x7F800000) {
return x;
}
if (k != 0) {
const uf = 1 + x;
var iu = @bitCast(u32, uf);
iu += 0x3F800000 - 0x3F3504F3;
k = @intCast(i32, iu >> 23) - 0x7F;
// correction to avoid underflow in c / u
if (k < 25) {
c = if (k >= 2) 1 - (uf - x) else x - (uf - 1);
c /= uf;
} else {
c = 0;
}
// u into [sqrt(2)/2, sqrt(2)]
iu = (iu & 0x007FFFFF) + 0x3F3504F3;
f = @bitCast(f32, iu) - 1;
}
const s = f / (2.0 + f);
const z = s * s;
const w = z * z;
const t1 = w * (Lg2 + w * Lg4);
const t2 = z * (Lg1 + w * Lg3);
const R = t2 + t1;
const hfsq = 0.5 * f * f;
const dk = @intToFloat(f32, k);
return s * (hfsq + R) + (dk * ln2_lo + c) - hfsq + f + dk * ln2_hi;
}
fn log1p_64(x: f64) f64 {
const ln2_hi: f64 = 6.93147180369123816490e-01;
const ln2_lo: f64 = 1.90821492927058770002e-10;
const Lg1: f64 = 6.666666666666735130e-01;
const Lg2: f64 = 3.999999999940941908e-01;
const Lg3: f64 = 2.857142874366239149e-01;
const Lg4: f64 = 2.222219843214978396e-01;
const Lg5: f64 = 1.818357216161805012e-01;
const Lg6: f64 = 1.531383769920937332e-01;
const Lg7: f64 = 1.479819860511658591e-01;
var ix = @bitCast(u64, x);
var hx = @intCast(u32, ix >> 32);
var k: i32 = 1;
var c: f64 = undefined;
var f: f64 = undefined;
// 1 + x < sqrt(2)
if (hx < 0x3FDA827A or hx >> 31 != 0) {
// x <= -1.0
if (hx >= 0xBFF00000) {
// log1p(-1) = -inf
if (x == -1.0) {
return -math.inf(f64);
}
// log1p(x < -1) = nan
else {
return math.nan(f64);
}
}
// |x| < 2^(-53)
if ((hx << 1) < (0x3CA00000 << 1)) {
if ((hx & 0x7FF00000) == 0) {
math.raiseUnderflow();
}
return x;
}
// sqrt(2) / 2- <= 1 + x < sqrt(2)+
if (hx <= 0xBFD2BEC4) {
k = 0;
c = 0;
f = x;
}
} else if (hx >= 0x7FF00000) {
return x;
}
if (k != 0) {
const uf = 1 + x;
const hu = @bitCast(u64, uf);
var iu = @intCast(u32, hu >> 32);
iu += 0x3FF00000 - 0x3FE6A09E;
k = @intCast(i32, iu >> 20) - 0x3FF;
// correction to avoid underflow in c / u
if (k < 54) {
c = if (k >= 2) 1 - (uf - x) else x - (uf - 1);
c /= uf;
} else {
c = 0;
}
// u into [sqrt(2)/2, sqrt(2)]
iu = (iu & 0x000FFFFF) + 0x3FE6A09E;
const iq = (@as(u64, iu) << 32) | (hu & 0xFFFFFFFF);
f = @bitCast(f64, iq) - 1;
}
const hfsq = 0.5 * f * f;
const s = f / (2.0 + f);
const z = s * s;
const w = z * z;
const t1 = w * (Lg2 + w * (Lg4 + w * Lg6));
const t2 = z * (Lg1 + w * (Lg3 + w * (Lg5 + w * Lg7)));
const R = t2 + t1;
const dk = @intToFloat(f64, k);
return s * (hfsq + R) + (dk * ln2_lo + c) - hfsq + f + dk * ln2_hi;
}
test "math.log1p" {
expect(log1p(@as(f32, 0.0)) == log1p_32(0.0));
expect(log1p(@as(f64, 0.0)) == log1p_64(0.0));
}
test "math.log1p_32" {
const epsilon = 0.000001;
expect(math.approxEq(f32, log1p_32(0.0), 0.0, epsilon));
expect(math.approxEq(f32, log1p_32(0.2), 0.182322, epsilon));
expect(math.approxEq(f32, log1p_32(0.8923), 0.637793, epsilon));
expect(math.approxEq(f32, log1p_32(1.5), 0.916291, epsilon));
expect(math.approxEq(f32, log1p_32(37.45), 3.649359, epsilon));
expect(math.approxEq(f32, log1p_32(89.123), 4.501175, epsilon));
expect(math.approxEq(f32, log1p_32(123123.234375), 11.720949, epsilon));
}
test "math.log1p_64" {
const epsilon = 0.000001;
expect(math.approxEq(f64, log1p_64(0.0), 0.0, epsilon));
expect(math.approxEq(f64, log1p_64(0.2), 0.182322, epsilon));
expect(math.approxEq(f64, log1p_64(0.8923), 0.637793, epsilon));
expect(math.approxEq(f64, log1p_64(1.5), 0.916291, epsilon));
expect(math.approxEq(f64, log1p_64(37.45), 3.649359, epsilon));
expect(math.approxEq(f64, log1p_64(89.123), 4.501175, epsilon));
expect(math.approxEq(f64, log1p_64(123123.234375), 11.720949, epsilon));
}
test "math.log1p_32.special" {
expect(math.isPositiveInf(log1p_32(math.inf(f32))));
expect(log1p_32(0.0) == 0.0);
expect(log1p_32(-0.0) == -0.0);
expect(math.isNegativeInf(log1p_32(-1.0)));
expect(math.isNan(log1p_32(-2.0)));
expect(math.isNan(log1p_32(math.nan(f32))));
}
test "math.log1p_64.special" {
expect(math.isPositiveInf(log1p_64(math.inf(f64))));
expect(log1p_64(0.0) == 0.0);
expect(log1p_64(-0.0) == -0.0);
expect(math.isNegativeInf(log1p_64(-1.0)));
expect(math.isNan(log1p_64(-2.0)));
expect(math.isNan(log1p_64(math.nan(f64))));
} | lib/std/math/log1p.zig |
const std = @import("std");
const table_bits: usize = 14; // Bits used in the table.
const table_size: usize = 1 << table_bits; // Size of the table.
const table_mask: usize = table_size - 1; // Mask for table indices. Redundant, but can eliminate bounds checks.
const table_shift: usize = 32 - table_bits; // Right-shift to get the tableBits most significant bits of a uint32.
const input_margin = 16 - 1;
const min_non_literal_block_size = 1 + 1 + input_margin;
const max_store_block_size: usize = 65535;
// The LZ77 step produces a sequence of literal tokens and <length, offset>
// pair tokens. The offset is also known as distance. The underlying wire
// format limits the range of lengths and offsets. For example, there are
// 256 legitimate lengths: those in the range [3, 258]. This package's
// compressor uses a higher minimum match length, enabling optimizations
// such as finding matches via 32-bit loads and compares.
const base_match_length: usize = 3; // The smallest match length per the RFC section 3.2.5
const max_match_offset: usize = 1 << 15;
const max_match_length: usize = 258; // The largest match length
const base_match_offset: usize = 1; // The smallest match offset
const min_match_Length: usize = 4; // The smallest match length that the compressor actually emits
const literal_type: u32 = 0 << 30;
const imput_margin = 16 - 1;
const min_non_literal_block_size = 1 + 1 + input_margin;
const match_type: u32 = 1 << 30;
const length_shift = 32;
pub const Fast = struct {
table: [table_size]TableEntry = [_]TableEntry{TableEntry{}} ** table_size,
prev: []u8 = blk: {
var a: [max_store_block_size]u8 = undefined;
break :blk a[0..];
},
prev_len: usize = 0,
cur: usize = max_store_block_size,
fn init() Fast {
return Fast{};
}
const TableEntry = struct {
val: u32 = 0,
offset: usize = 0,
};
fn load32(b: []const u8) u32 {
return @intCast(u32, b[0]) | @intCast(u32, b[1] << 8) |
@intCast(u32, b[2]) << 16 | @intCast(u32, b[3]) << 24;
}
fn load64(b: []const u8) u64 {
return @intCast(u64, b[0]) | @intCast(u64, b[1] << 8) |
@intCast(u64, b[2]) << 16 | @intCast(u64, b[3]) << 24 |
@intCast(u64, b[4]) << 32 | @intCast(u64, b[5]) << 40 |
@intCast(u64, b[6]) << 48 | @intCast(u64, b[7]) << 56;
}
fn hash(u: u32) u32 {
return (u * 0x1e35a7bd) >> @intCast(u32, table_shift);
}
fn literalToken(lit: u32) u32 {
return literal_type + lit;
}
fn matchToken(xlength: u32, xoffset: u32) u32 {
return match_type + xlength << length_shift + xoffset;
}
fn emitLiteral(dst: *std.ArrayList(u32), src: []const u8) !void {
for (src) |lit| {
try dst.append(literalToken(@intCast(u32, lit)));
}
}
pub fn encode(
self: *Fast,
dst: *std.ArrayList(u32),
src: []const u8,
) !void {
if (self.cur > (1 << 30)) {
self.resetAll();
}
if (src.len < min_non_literal_block_size) {
self.cur += max_store_block_size;
self.zeroPrev();
return emitLiteral(dst, src);
}
const s_limit = s.len - input_margin;
var next_emit: usize = 0;
var s: usize = 0;
var cv = load32(src);
var next_hash = hash(cv);
while (true) {
// Copied from the C++ snappy implementation:
//
// Heuristic match skipping: If 32 bytes are scanned with no matches
// found, start looking only at every other byte. If 32 more bytes are
// scanned (or skipped), look at every third byte, etc.. When a match
// is found, immediately go back to looking at every byte. This is a
// small loss (~5% performance, ~0.1% density) for compressible data
// due to more bookkeeping, but for non-compressible data (such as
// JPEG) it's a huge win since the compressor quickly "realizes" the
// data is incompressible and doesn't bother looking for matches
// everywhere.
//
// The "skip" variable keeps track of how many bytes there are since
// the last match; dividing it by 32 (ie. right-shifting by five) gives
// the number of bytes to move ahead for each iteration.
var skip: usize = 32;
var next_s = s;
var candidate = TableEntry{};
while (true) {
s = next_s;
const bytes_between_hash_lookup = skip >> 5;
next_s = s + bytes_between_hash_lookup;
skip += bytes_between_hash_lookup;
if (next_s > s_limit) {
if (next_emit < src.len) {
try emitLiteral(dst, src[next_emit..]);
}
self.cur += src.len;
self.prev_len = src.len;
std.mem.copy(u8, self.prev, src);
return;
}
const x = @intCast(usize, next_hash) & table_mask;
candidate = self.table[x];
var now = load32(src);
self.table[x] = TableEntry{
.offset = s + self.cur,
.val = cv,
};
next_hash = hash(now);
const offset = s - (candidate.offset - self.cur);
if (offset > max_match_offset or cv != candidate.val) {
cv = now;
continue;
}
break;
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
try emitLiteral(dst, src[next_emit..s]);
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
// what was consumed by the last emitCopy call.
//
// If we exit this loop normally then we need to call emitLiteral next,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can
// exit this loop via goto if we get close to exhausting the input.
while (true) {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
// Extend the 4-byte match as long as possible.
s += 4;
const t = @intCast(isize, candidate.offset) - @intCast(isize, e.cur) + 4;
const l = self.matchLen(s, t, src);
try dst.append(matchToken(
@intCast(u32, l + 4 + base_match_length),
@intCast(u32, @intCast(isize, s) - t - @intCast(isize, base_match_offset)),
));
s += l;
next_emit = s;
if (s >= s_limit) {
if (next_emit < src.len) {
try emitLiteral(dst, src[next_emit..]);
}
self.cur += src.len;
self.prev_len = src.len;
std.mem.copy(u8, self.prev, src);
return;
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-1 and at s. If
// another emitCopy is not our next move, also calculate nextHash
// at s+1. At least on GOARCH=amd64, these three hash calculations
// are faster as one load64 call (with some shifts) instead of
// three load32 calls.
var x = load64(src);
const prev_hash = hash(@intCast(u32, x));
self.table[prev_hash & table_mask] = TableEntry{
.offset = self.cur + s - 1,
.val = @intCast(u32, x),
};
x >>= 8;
const curr_hash = hash(@intCast(u32, x));
candidate = self.table[curr_hash & table_mask];
self.table[curr_hash & table_mask] = TableEntry{
.offset = self.cur + s,
.val = @intCast(u32, x),
};
const offset = s - @intCast(usize, candidate.offset - self.cur);
if (offset > max_match_offset or @intCast(u32, x) != candidate.val) {
cv = @intCast(u32, x >> 8);
next_hash = hash(cv);
s += 1;
break;
}
}
}
}
fn matchLen(self: *Fast, s: usize, t: isize, src: []const u8) usize {
var s1 = s + max_match_length - 4;
if (s1 > src.len) {
s1 = src.len;
}
if (t >= 0) {
const tt = @intCast(usize, t);
var b = src[tt..];
const a = src[s..s1];
b = b[0..a.len];
var i: usize = 0;
while (i < a.len) : (i += 1) {
if (a[i] != b[i]) {
return i;
}
}
return a.len;
}
const tp = @intCast(isize, self.prev_len) + t;
if (tp < 0) {
return 0;
}
var a = src[s..s1];
var b = self.prev[@intCast(usize, tp)..];
if (b.len > a.len) {
b = b[0..a.len];
}
a = a[0..b.len];
var i: usize = 0;
while (i < b.len) : (i += 1) {
if (a[i] != b[i]) {
return i;
}
}
const n = b.len;
if (s + n == s1) {
return n;
}
a = src[s + n .. s1];
b = src[0..a.len];
i = 0;
while (i < a.len) : (i += 1) {
if (a[i] != b[i]) {
return i + n;
}
}
return a.len + n;
}
fn reset(self: *Fast) void {
self.zeroPrev();
self.cur += max_match_offset;
if (self.cur > (1 << 30)) {
self.resetAll();
}
}
fn zeroPrev(self: *Fast) void {
var i: usize = 0;
while (i < self.prev.len) : (i += 1) {
self.prev[i] = 0;
}
self.prev_len = 0;
}
fn resetAll(self: *Fast) void {
self.cur = max_store_block_size;
self.zeroPrev();
var i: usize = 0;
while (i < self.table.len) : (i += 1) {
self.table[i] = TableEntry{};
}
}
}; | src/fast.zig |
const std = @import("std");
const copy = std.mem.copy;
const Random = std.rand.Random;
const W = 8;
pub fn PrivateKey(comptime Hash: type) type {
const n = Hash.digest_length;
return struct {
const Self = @This();
pub const digest_length = n;
forward_hash_key: [n][n]u8 = undefined,
reverse_hash_key: [n][n]u8 = undefined,
pub fn init(csprng: *Random) Self {
var self = Self{};
for (self.forward_hash_key) |_, i| {
csprng.bytes(self.forward_hash_key[i][0..]);
}
for (self.reverse_hash_key) |_, i| {
csprng.bytes(self.reverse_hash_key[i][0..]);
}
return self;
}
};
}
pub fn PublicKey(comptime Hash: type) type {
const n = Hash.digest_length;
return struct {
const Self = @This();
pub const digest_length = n;
forward_hash_key: [n][n]u8,
reverse_hash_key: [n][n]u8,
pub fn fromPrivateKey(pk: *const PrivateKey(Hash)) Self {
var self = Self{
.forward_hash_key = pk.forward_hash_key,
.reverse_hash_key = pk.reverse_hash_key,
};
const iterations = [_]u8{(1 << W)-1} ** n; // max iterations
multi_hash(Hash, iterations, &self.forward_hash_key, false, 1);
multi_hash(Hash, iterations, &self.reverse_hash_key, false, 1);
return self;
}
pub fn fromSignature(sig: *const Signature(Hash)) Self {
var self = Self{
.forward_hash_key = sig.forward_hash_key,
.reverse_hash_key = sig.reverse_hash_key,
};
multi_hash(Hash, sig.messge_digest, &self.forward_hash_key, true, 1);
multi_hash(Hash, sig.messge_digest, &self.reverse_hash_key, false, 1);
return self;
}
pub fn compress(self: *const Self, digest: *[n]u8) void {
var d = Hash.init(.{});
for (self.forward_hash_key) |key| {
d.update(key[0..]);
}
for (self.reverse_hash_key) |key| {
d.update(key[0..]);
}
d.final(digest);
}
};
}
pub fn Signature(comptime Hash: type) type {
const n = Hash.digest_length;
return struct {
const Self = @This();
pub const digest_length = n;
messge_digest: [n]u8 = undefined,
forward_hash_key: [n][n]u8,
reverse_hash_key: [n][n]u8,
pub fn fromPrivateKey(pk: *const PrivateKey(Hash), msg: []const u8) Self {
var self = Self{
.forward_hash_key = pk.forward_hash_key,
.reverse_hash_key = pk.reverse_hash_key,
};
Hash.hash(msg, self.messge_digest[0..], .{});
multi_hash(Hash, self.messge_digest, &self.forward_hash_key, false, 0);
multi_hash(Hash, self.messge_digest, &self.reverse_hash_key, true, 0);
return self;
}
};
}
pub fn DRNG(comptime Aead: type, comptime output_length: usize) type {
const seed_length = Aead.key_length + output_length;
return struct {
pub const key_length: output_length;
const Self = @This();
secret1: [Aead.key_length]u8 = undefined,
secret2: [output_length]u8 = undefined,
nonce: [Aead.nonce_length]u8,
pub fn init(seed: [seed_length]u8, nonce: [Aead.nonce_length]u8) Self {
var self = Self{
.nonce = nonce,
};
copy(u8, self.secret1[0..], seed[0..Aead.key_length]);
copy(u8, self.secret2[0..], seed[Aead.key_length..]);
return self;
}
pub fn next(self: *Self) !void {
var overflow = true;
// constant time (unconfirmed) algo for side-channel protection
for (self.nonce) |byte,i| {
const carry: u8 = if (overflow) 1 else 0;
overflow = @addWithOverflow(u8, byte, carry, &self.nonce[i]);
}
if (overflow) {
return error.Overflow;
}
}
pub fn generate(self: *const Self, key: *[output_length]u8) void {
const nothing = [_]u8{};
var tag: [Aead.tag_length]u8 = undefined;
Aead.encrypt(key, tag[0..], self.secret2[0..], nothing[0..], self.nonce, self.secret1);
}
};
}
fn multi_hash(
comptime Hash: type,
iterations: [Hash.digest_length]u8,
digest: *[Hash.digest_length][Hash.digest_length]u8,
flipbits: bool,
extra_iterations: u8,
) void {
for (iterations) |n, i| {
const m: usize = (if (flipbits) ~n else n) + extra_iterations;
var k: usize = 0;
while (k < m) : (k += 1) {
Hash.hash(digest[i][0..], digest[i][0..], .{});
}
}
} | src/main.zig |
const std = @import("std");
const Allocator = std.mem.Allocator;
const List = std.ArrayList;
const Map = std.AutoHashMap;
const StrMap = std.StringHashMap;
const BitSet = std.DynamicBitSet;
const Str = []const u8;
const util = @import("util.zig");
const gpa = util.gpa;
// const data = @embedFile("../data/day08-tst.txt");
const data = @embedFile("../data/day08.txt");
pub fn main() !void {
part1();
try part2();
}
fn part1() void {
var lines = tokenize(u8, data, "\r\n");
var cpt: u32 = 0;
while (lines.next()) |line| {
var it = tokenize(u8, line, " |");
var i: u32 = 0;
while (i < 10) : ({
i += 1;
_ = it.next();
}) {}
while (it.next()) |digit| {
var len = digit.len;
if ((len >= 2 and len <= 4) or (len == 7)) {
cpt += 1;
}
}
}
print("{}\n", .{cpt});
}
fn part2() !void {
// 0 -> 'a', ..., 7 -> 'g'
var lines = tokenize(u8, data, "\r\n");
var sum: usize = 0;
while (lines.next()) |line| {
sum += try handleLine(line);
}
print("{}\n", .{sum});
}
fn cmpStr(_: void, a: Str, b: Str) bool {
if (a.len < b.len) return true;
if (a.len > b.len) return false;
var i: usize = 0;
while (i < a.len) : (i += 1) {
if (a[i] < b[i]) return true;
if (a[i] > b[i]) return false;
}
return false;
}
fn cmpValue(context: void, a: u8, b: u8) bool {
return std.sort.asc(u8)(context, a, b);
}
fn handleLine(line: Str) !usize {
var line_split = tokenize(u8, line, "|");
var left = line_split.next().?;
var right = line_split.next().?;
var chars = [_]u8{0} ** 7;
var inputs = try List(Str).initCapacity(gpa, 10);
var it = tokenize(u8, left, " ");
while (it.next()) |str| {
try inputs.append(str);
}
std.sort.sort(Str, inputs.items, {}, cmpStr);
try decode(inputs.items, chars[0..]);
var numbers = [10][7]u8{
[_]u8{ chars[0], chars[1], chars[2], chars[4], chars[5], chars[6], 0 },
[_]u8{ chars[2], chars[5], 0, 0, 0, 0, 0 },
[_]u8{ chars[0], chars[2], chars[3], chars[4], chars[6], 0, 0 },
[_]u8{ chars[0], chars[2], chars[3], chars[5], chars[6], 0, 0 },
[_]u8{ chars[1], chars[2], chars[3], chars[5], 0, 0, 0 },
[_]u8{ chars[0], chars[1], chars[3], chars[5], chars[6], 0, 0 },
[_]u8{ chars[0], chars[1], chars[3], chars[4], chars[5], chars[6], 0 },
[_]u8{ chars[0], chars[2], chars[5], 0, 0, 0, 0 },
[_]u8{ chars[0], chars[1], chars[2], chars[3], chars[4], chars[5], chars[6] },
[_]u8{ chars[0], chars[1], chars[2], chars[3], chars[5], chars[6], 0 },
};
var buffer = [_]u8{0} ** 7;
it = tokenize(u8, right, " ");
var sum: usize = 0;
var mult: usize = 1000;
while (it.next()) |str| {
for (numbers) |num, i| {
var inter = try intersection(buffer[0..], num[0..], str);
if (inter.len == str.len and inter.len == strLen(num[0..])) {
sum += mult * i;
mult /= 10;
break;
}
}
}
return sum;
}
// inputs[0] -> 1
// inputs[1] -> 7
// inputs[2] -> 4
// inputs[3-5] -> 2, 3, 5
// inputs[6-8] -> 0, 6, 9
// inputs[9] -> 8
fn decode(input: []Str, chars: []u8) !void {
var buffer = [_]u8{0} ** 7;
var i_3: usize = 0;
var i_5: usize = 0;
var s_1 = input[0];
var s_4 = input[2];
var s_7 = input[1];
var s_2: Str = undefined;
var s_3: Str = undefined;
var s_5: Str = undefined;
// Find '3'. This is the string that shares 2 chars with '1'
var i: usize = 3;
while (i <= 5) : (i += 1) {
var inter = try intersection(buffer[0..], s_1, input[i]);
if (inter.len == 2) {
s_3 = input[i];
i_3 = i;
break;
}
}
// 'a' is the difference between '7' and '1'
var diff_7_1 = try difference(buffer[0..], s_7, s_1);
assert(diff_7_1.len == 1);
chars[0] = diff_7_1[0]; // a
// We compute the difference between '4' and '1' to find '2' and '5'
var diff_4_1 = [2]u8{ 0, 0 };
_ = try difference(diff_4_1[0..], s_4, s_1);
// '5' in the string that shares 2 chars with '4-1'
i = 3;
while (i <= 5) : (i += 1) {
var inter = try intersection(buffer[0..], diff_4_1[0..], input[i]);
if (inter.len == 2) {
s_5 = input[i];
i_5 = i;
break;
}
}
// 'b' is the difference between ('4'-'1' and '3')
var diff_41_3 = try difference(buffer[0..], diff_4_1[0..], s_3);
assert(diff_41_3.len == 1);
chars[1] = diff_41_3[0]; // b
// '2' is neither '3' nor '5'
assert(i_3 != 0);
assert(i_5 != 0);
i = 3;
while (i <= 5) : (i += 1) {
if (i_3 != i and i_5 != i) {
s_2 = input[i];
break;
}
}
// 'c' is the intersection between '1' and '2'
var inter_1_2 = try intersection(buffer[0..], s_1, s_2);
assert(inter_1_2.len == 1);
chars[2] = inter_1_2[0];
// 'd' is the intersection between '3' and '4'-'1'
var inter_3_41 = try intersection(buffer[0..], s_3, diff_4_1[0..]);
assert(inter_3_41.len == 1);
chars[3] = inter_3_41[0];
// 'e' is the difference between '2' and '3'
var diff_2_3 = try difference(buffer[0..], s_2, s_3);
assert(diff_2_3.len == 1);
chars[4] = diff_2_3[0];
// 'f' is the intersection between '5' and '1'
var inter_5_1 = try intersection(buffer[0..], s_1, s_5);
assert(inter_5_1.len == 1);
chars[5] = inter_5_1[0];
// 'g' is the character we never inserted
var all_chars = [_]u8{ 'a', 'b', 'c', 'd', 'e', 'f', 'g' };
var remaining = try difference(buffer[0..], all_chars[0..], chars);
assert(remaining.len == 1);
chars[6] = remaining[0];
}
fn intersection(buffer: []u8, a: Str, b: Str) !Str {
var result = buffer;
std.mem.set(u8, result, 0);
var i: usize = 0;
for (a) |c0| {
for (b) |c1| {
if (c0 == c1) {
result[i] = c0;
i += 1;
}
}
}
result.len = i;
return result;
}
/// Elements of a that are not in b
fn difference(buffer: []u8, a: Str, b: Str) !Str {
var result = buffer;
std.mem.set(u8, result, 0);
var i: usize = 0;
for (a) |c0| {
var found = false;
for (b) |c1| {
if (c0 == c1) found = true;
}
if (!found) {
result[i] = c0;
i += 1;
}
}
result.len = i;
return result;
}
fn strLen(str: Str) usize {
var len: usize = 0;
for (str) |c| {
if (c == 0) {
return len;
}
len += 1;
}
return len;
}
// Useful stdlib functions
const tokenize = std.mem.tokenize;
const print = std.debug.print;
const assert = std.debug.assert; | src/day08.zig |
const std = @import("std");
const Allocator = std.mem.Allocator;
const List = std.ArrayList;
const Map = std.AutoHashMap;
const StrMap = std.StringHashMap;
const BitSet = std.DynamicBitSet;
const Str = []const u8;
var gpa_impl = std.heap.GeneralPurposeAllocator(.{}){};
pub const gpa = gpa_impl.allocator();
pub fn parseIntArray(in: []const u8) !std.ArrayList(i32) {
var it = tokenize(u8, in, "\r\n");
var list = List(i32).init(gpa);
while (true) {
const next = it.next();
if (next == null) {
break;
}
const val = try parseInt(i32, next.?, 10);
try list.append(val);
}
return list;
}
pub fn getBin(str: Str) !BitSet {
var bin = try BitSet.initEmpty(str.len, gpa);
for (str) |c, i| {
if (c == '1') {
bin.set(i);
}
}
return bin;
}
pub fn parseBinaries(in_data: []const u8) !List(BitSet) {
var it = tokenize(u8, in_data, "\n\r");
var bins = List(BitSet).init(gpa);
// Parse bins
while (true) {
var next = it.next();
if (next == null) {
break;
}
const bin = try getBin(next.?);
try bins.append(bin);
}
return bins;
}
pub fn binToDecimal(bin: *BitSet) usize {
var value: usize = 0;
var mult: usize = 1;
var i: usize = bin.unmanaged.bit_length - 1;
while (true) : (i -= 1) {
if (bin.isSet(i)) {
value += mult;
}
mult *= 2;
if (i == 0) {
break;
}
}
return value;
}
/// out must be at least 4 elements wide
pub fn getNeighborIndices(i: usize, w: usize, h: usize, buffer: []usize) []usize {
var x = @mod(i, w);
var y = @divFloor(i, w);
var out = buffer;
var it: usize = 0;
if (x > 0) {
out[it] = (x - 1) + y * w;
it += 1;
}
if (x < w - 1) {
out[it] = (x + 1) + y * w;
it += 1;
}
if (y > 0) {
out[it] = x + (y - 1) * w;
it += 1;
}
if (y < h - 1) {
out[it] = x + (y + 1) * w;
it += 1;
}
out.len = it;
return out;
}
pub fn range(len: usize) []const u0 {
return @as([*]u0, undefined)[0..len];
}
// Useful stdlib functions
const tokenize = std.mem.tokenize;
const split = std.mem.split;
const indexOf = std.mem.indexOfScalar;
const indexOfAny = std.mem.indexOfAny;
const indexOfStr = std.mem.indexOfPosLinear;
const lastIndexOf = std.mem.lastIndexOfScalar;
const lastIndexOfAny = std.mem.lastIndexOfAny;
const lastIndexOfStr = std.mem.lastIndexOfLinear;
const trim = std.mem.trim;
const sliceMin = std.mem.min;
const sliceMax = std.mem.max;
const parseInt = std.fmt.parseInt;
const parseFloat = std.fmt.parseFloat;
const min = std.math.min;
const min3 = std.math.min3;
const max = std.math.max;
const max3 = std.math.max3;
const print = std.debug.print;
const assert = std.debug.assert;
const sort = std.sort.sort;
const asc = std.sort.asc;
const desc = std.sort.desc; | src/util.zig |
pub const deployment_environment = .development;
/// The maximum log level in increasing order of verbosity (emergency=0, debug=7):
pub const log_level = 6;
/// The maximum number of replicas allowed in a cluster.
pub const replicas_max = 15;
/// The minimum number of nodes required to form quorums for leader election or replication:
/// Majority quorums are only required across leader election and replication phases (not within).
/// As per Flexible Paxos, provided quorum_leader_election + quorum_replication > cluster_nodes:
/// 1. you may increase quorum_leader_election above a majority, so that
/// 2. you can decrease quorum_replication below a majority, to optimize the common case.
/// This improves latency by reducing the number of nodes required for synchronous replication.
/// This reduces redundancy only in the short term, asynchronous replication will still continue.
pub const quorum_leader_election = -1;
pub const quorum_replication = 2;
/// The default server port to listen on if not specified in `--replica-addresses`:
pub const port = 3001;
/// The default network interface address to listen on if not specified in `--replica-addresses`:
/// WARNING: Binding to all interfaces with "0.0.0.0" is dangerous and opens the server to anyone.
/// Bind to the "127.0.0.1" loopback address to accept local connections as a safe default only.
pub const address = "127.0.0.1";
/// Where journal files should be persisted:
pub const data_directory = "/var/lib/tigerbeetle";
/// The maximum number of accounts to store in memory:
/// This impacts the amount of memory allocated at initialization by the server.
pub const accounts_max = switch (deployment_environment) {
.production => 1_000_000,
else => 100_000,
};
/// The maximum number of transfers to store in memory:
/// This impacts the amount of memory allocated at initialization by the server.
/// We allocate more capacity than the number of transfers for a safe hash table load factor.
pub const transfers_max = switch (deployment_environment) {
.production => 100_000_000,
else => 1_000_000,
};
/// The maximum number of two-phase commits to store in memory:
/// This impacts the amount of memory allocated at initialization by the server.
pub const commits_max = transfers_max;
/// The maximum size of the journal file:
/// This is pre-allocated and zeroed for performance when initialized.
/// Writes within this file never extend the filesystem inode size reducing the cost of fdatasync().
/// This enables static allocation of disk space so that appends cannot fail with ENOSPC.
/// This also enables us to detect filesystem inode corruption that would change the journal size.
pub const journal_size_max = switch (deployment_environment) {
.production => 128 * 1024 * 1024 * 1024,
else => 256 * 1024 * 1024,
};
/// The maximum number of batch entries in the journal file:
/// A batch entry may contain many transfers, so this is not a limit on the number of transfers.
/// We need this limit to allocate space for copies of batch headers at the start of the journal.
/// These header copies enable us to disentangle corruption from crashes and recover accordingly.
pub const journal_headers_max = switch (deployment_environment) {
.production => 1024 * 1024,
else => 16384,
};
/// The maximum number of connections that can be accepted and held open by the server at any time:
pub const connections_max = 32;
/// The maximum size of a message in bytes:
/// This is also the limit of all inflight data across multiple pipelined requests per connection.
/// We may have one request of up to 4 MiB inflight or 4 pipelined requests of up to 1 MiB inflight.
/// This impacts sequential disk write throughput, the larger the buffer the better.
/// 4 MiB is 32,768 transfers, and a reasonable choice for sequential disk write throughput.
/// However, this impacts bufferbloat and head-of-line blocking latency for pipelined requests.
/// For a 1 Gbps NIC = 125 MiB/s throughput: 4 MiB / 125 * 1000ms = 32ms for the next request.
/// This also impacts the amount of memory allocated at initialization by the server.
pub const message_size_max = 4 * 1024 * 1024;
/// The number of full-sized messages allocated at initialization by the message bus.
pub const message_bus_messages_max = connections_max * 4;
/// The number of header-sized messages allocated at initialization by the message bus.
/// These are much smaller/cheaper and we can therefore have many of them.
pub const message_bus_headers_max = connections_max * connection_send_queue_max;
/// The minimum and maximum amount of time in milliseconds to wait before initiating a connection.
/// Exponential backoff and full jitter are applied within this range.
/// For more, see: https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
pub const connection_delay_min = 50;
pub const connection_delay_max = 1000;
/// The maximum number of outgoing messages that may be queued on a connection.
pub const connection_send_queue_max = 3;
/// The maximum number of connections in the kernel's complete connection queue pending an accept():
/// If the backlog argument is greater than the value in `/proc/sys/net/core/somaxconn`, then it is
/// silently truncated to that value. Since Linux 5.4, the default in this file is 4096.
pub const tcp_backlog = 64;
/// The maximum size of a kernel socket receive buffer in bytes (or 0 to use the system default):
/// This sets SO_RCVBUF as an alternative to the auto-tuning range in /proc/sys/net/ipv4/tcp_rmem.
/// The value is limited by /proc/sys/net/core/rmem_max, unless the CAP_NET_ADMIN privilege exists.
/// The kernel doubles this value to allow space for packet bookkeeping overhead.
/// The receive buffer should ideally exceed the Bandwidth-Delay Product for maximum throughput.
/// At the same time, be careful going beyond 4 MiB as the kernel may merge many small TCP packets,
/// causing considerable latency spikes for large buffer sizes:
/// https://blog.cloudflare.com/the-story-of-one-latency-spike/
pub const tcp_rcvbuf = 4 * 1024 * 1024;
/// The maximum size of a kernel socket send buffer in bytes (or 0 to use the system default):
/// This sets SO_SNDBUF as an alternative to the auto-tuning range in /proc/sys/net/ipv4/tcp_wmem.
/// The value is limited by /proc/sys/net/core/wmem_max, unless the CAP_NET_ADMIN privilege exists.
/// The kernel doubles this value to allow space for packet bookkeeping overhead.
pub const tcp_sndbuf = 4 * 1024 * 1024;
/// Whether to enable TCP keepalive:
pub const tcp_keepalive = true;
/// The time (in seconds) the connection needs to be idle before sending TCP keepalive probes:
/// Probes are not sent when the send buffer has data or the congestion window size is zero,
/// for these cases we also need tcp_user_timeout below.
pub const tcp_keepidle = 5;
/// The time (in seconds) between individual keepalive probes:
pub const tcp_keepintvl = 4;
/// The maximum number of keepalive probes to send before dropping the connection:
pub const tcp_keepcnt = 3;
/// The time (in milliseconds) to timeout an idle connection or unacknowledged send:
/// This timer rides on the granularity of the keepalive or retransmission timers.
/// For example, if keepalive will only send a probe after 10s then this becomes the lower bound
/// for tcp_user_timeout to fire, even if tcp_user_timeout is 2s. Nevertheless, this would timeout
/// the connection at 10s rather than wait for tcp_keepcnt probes to be sent. At the same time, if
/// tcp_user_timeout is larger than the max keepalive time then tcp_keepcnt will be ignored and
/// more keepalive probes will be sent until tcp_user_timeout fires.
/// For a thorough overview of how these settings interact:
/// https://blog.cloudflare.com/when-tcp-sockets-refuse-to-die/
pub const tcp_user_timeout = (tcp_keepidle + tcp_keepintvl * tcp_keepcnt) * 1000;
/// Whether to disable Nagle's algorithm to eliminate send buffering delays:
pub const tcp_nodelay = true;
/// The minimum size of an aligned kernel page and an Advanced Format disk sector:
/// This is necessary for direct I/O without the kernel having to fix unaligned pages with a copy.
/// The new Advanced Format sector size is backwards compatible with the old 512 byte sector size.
/// This should therefore never be less than 4 KiB to be future-proof when server disks are swapped.
pub const sector_size = 4096;
/// Whether to perform direct I/O to the underlying disk device:
/// This enables several performance optimizations:
/// * A memory copy to the kernel's page cache can be eliminated for reduced CPU utilization.
/// * I/O can be issued immediately to the disk device without buffering delay for improved latency.
/// This also enables several safety features:
/// * Disk data can be scrubbed to repair latent sector errors and checksum errors proactively.
/// * Fsync failures can be recovered from correctly.
/// WARNING: Disabling direct I/O is unsafe; the page cache cannot be trusted after an fsync error,
/// even after an application panic, since the kernel will mark dirty pages as clean, even
/// when they were never written to disk.
pub const direct_io = true;
/// The number of milliseconds between each replica tick, the basic unit of time in TigerBeetle.
/// Used to regulate heartbeats, retries and timeouts, all specified as multiples of a tick.
pub const tick_ms = 10;
/// The maximum skew between two clocks to allow when considering them to be in agreement.
/// The principle is that no two clocks tick exactly alike but some clocks more or less agree.
/// The maximum skew across the cluster as a whole is this value times the total number of clocks.
/// The cluster will be unavailable if the majority of clocks are all further than this value apart.
/// Decreasing this reduces the probability of reaching agreement on synchronized time.
/// Increasing this reduces the accuracy of synchronized time.
pub const clock_offset_tolerance_max_ms = 10000;
/// The amount of time before the clock's synchronized epoch is expired.
/// If the epoch is expired before it can be replaced with a new synchronized epoch, then this most
/// likely indicates either a network partition or else too many clock faults across the cluster.
/// A new synchronized epoch will be installed as soon as these conditions resolve.
pub const clock_epoch_max_ms = 60000;
/// The amount of time to wait for enough accurate samples before synchronizing the clock.
/// The more samples we can take per remote clock source, the more accurate our estimation becomes.
/// This impacts cluster startup time as the leader must first wait for synchronization to complete.
pub const clock_synchronization_window_min_ms = 2000;
/// The amount of time without agreement before the clock window is expired and a new window opened.
/// This happens where some samples have been collected but not enough to reach agreement.
/// The quality of samples degrades as they age so at some point we throw them away and start over.
/// This eliminates the impact of gradual clock drift on our clock offset (clock skew) measurements.
/// If a window expires because of this then it is likely that the clock epoch will also be expired.
pub const clock_synchronization_window_max_ms = 20000; | src/config.zig |
const std = @import("std");
const log = std.log;
const builtin = @import("builtin");
const Parser = @import("parser.zig").Parser;
const HTMLGenerator = @import("html.zig").HTMLGenerator;
const CodeRunner = @import("code_chunks.zig").CodeRunner;
const cite = @import("cite.zig");
const run_citeproc = cite.run_citeproc;
const csl = @import("csl_json.zig");
const ast = @import("ast.zig");
const clap = @import("zig-clap");
pub fn main() !void {
// gpa optimized for safety over performance; can detect leaks, double-free and use-after-free
// takes a config struct (empty here .{})
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer {
const leaked = gpa.deinit();
// print takes a format string and a struct
// prints automatically std.debug.print("Leak detected: {}\n", .{leaked});
}
const allocator = &gpa.allocator;
// const allocator = std.heap.page_allocator;
// We can use `parseParam` to parse a string to a `Param(Help)`
@setEvalBranchQuota(2000);
const params = comptime [_]clap.Param(clap.Help){
clap.parseParam("-h, --help Display this help and exit.") catch unreachable,
clap.parseParam("-o, --out <FILENAME> Output filename.") catch unreachable,
clap.parseParam("-r, --references <FILENAME> Path to references file (BibLaTeX or CSL-JSON).") catch unreachable,
clap.parseParam("-s, --citation-style <FILENAME> Path to CSL file.") catch unreachable,
clap.parseParam("-l, --locale <LOCALE> Specify locale as BCP 47 language tag.") catch unreachable,
clap.parseParam("--write-bib-conversion Whether to write out the converted .bib file as CSL-JSON") catch unreachable,
// clap.parseParam(
// "-s, --string <STR>... An option parameter which can be specified multiple times.") catch unreachable,
clap.parseParam("<IN-FILE>") catch unreachable,
};
// Initalize our diagnostics, which can be used for reporting useful errors.
// This is optional. You can also pass `.{}` to `clap.parse` if you don't
// care about the extra information `Diagnostics` provides.
var diag = clap.Diagnostic{};
// TODO use parseEx since using clap.parse directly is bugged even though
// it's just a thin wrapper https://github.com/Hejsil/zig-clap/issues/43
// somehow the cause of the issue is parseEx reusing OsIterator's allocator
// var args = clap.parse(clap.Help, ¶ms, .{ .diagnostic = &diag, .allocator = allocator }) catch |err| {
// // Report useful error and exit
// diag.report(std.io.getStdErr().writer(), err) catch {};
// return err;
// };
// defer args.deinit();
// We then initialize an argument iterator. We will use the OsIterator as it nicely
// wraps iterating over arguments the most efficient way on each os.
var iter = try clap.args.OsIterator.init(allocator);
defer iter.deinit();
var args = clap.parseEx(clap.Help, ¶ms, &iter, .{
.allocator = allocator,
.diagnostic = &diag,
}) catch |err| {
// Report useful error and exit
diag.report(std.io.getStdErr().writer(), err) catch {};
return err;
};
defer args.deinit();
if (args.flag("--help") or args.positionals().len != 1) {
const writer = std.io.getStdErr().writer();
if (builtin.os.tag == .windows) {
try writer.writeAll("Usage: scimd.exe ");
} else {
try writer.writeAll("Usage: scimd ");
}
try clap.usage(writer, ¶ms);
try writer.writeByte('\n');
try clap.help(writer, ¶ms);
std.process.exit(1);
}
const pos_args = args.positionals();
const in_file = pos_args[0];
log.debug("In file: {s}\n", .{in_file});
var out_filename: []const u8 = undefined;
if (args.option("--out")) |out| {
log.debug("Out direct: {s}\n", .{out});
out_filename = out;
} else {
const base = std.fs.path.basename(in_file);
const ext = std.fs.path.extension(base);
const new_ext = ".html";
// will be leaked but we need it till the end anyway
const out_buf = try allocator.alloc(u8, base.len - ext.len + new_ext.len);
std.mem.copy(u8, out_buf, base[0 .. base.len - ext.len]);
std.mem.copy(u8, out_buf[base.len - ext.len ..], new_ext);
out_filename = out_buf;
}
log.debug("Out file: {s}\n", .{out_filename});
var ref_file: ?[]const u8 = null;
if (args.option("--references")) |ref_fn| {
ref_file = ref_fn;
}
var csl_file: ?[]const u8 = null;
if (args.option("--citation-style")) |csl_fn| {
csl_file = csl_fn;
}
var csl_locale: ?[]const u8 = null;
if (args.option("--locale")) |csl_loc| {
csl_locale = csl_loc;
}
var parser: Parser = try Parser.init(allocator, in_file);
defer parser.deinit();
try parser.parse();
// execute code for found languages
var run_lang_iter = parser.run_languages.iterator();
var runners = std.ArrayList(CodeRunner).init(allocator);
defer runners.deinit();
while (run_lang_iter.next()) |lang| {
var code_runner = try runners.addOne();
code_runner.* = try CodeRunner.init(allocator, lang, parser.current_document);
// TODO still go for checking if the exe is available manually?
// unfortunately not possible to switch on error sets yet:
// https://github.com/ziglang/zig/issues/2473
code_runner.run() catch |err| {
log.err("Running {s} code chunks with executable '{s}' failed with error: {s}\n",
.{ @tagName(lang), "TODO", @errorName(err) });
};
}
defer {
for (runners.items) |*runner| {
runner.deinit();
}
}
if (parser.citations.items.len > 0 and (ref_file != null or csl_file != null)) {
if (ref_file != null and csl_file != null and csl_locale != null) {
const write_conversion = args.flag("--write-bib-conversion");
const csl_json_result = try cite.csl_items_from_file(allocator, ref_file.?, write_conversion);
defer csl_json_result.arena.deinit();
const bib_entries = run_citeproc(
&parser.node_arena.allocator, parser.citations.items, csl_json_result.items,
csl_file.?, csl_locale.?) catch |err| blk: {
log.err("Running citeproc failed with error: {s}\n", .{ @errorName(err) });
log.err("Citation processing was aborted!", .{});
break :blk &[_]*ast.Node{};
};
if (parser.bibliography) |bib| {
for (bib_entries) |entry| {
bib.append_child(entry);
}
}
} else {
log.warn(
"Both a references file (BibLaTeX or CSL-JSON) as well as CSL file " ++
"and a locale is needed to process citations!", .{});
}
}
var html_gen = HTMLGenerator.init(allocator, parser.current_document, parser.label_node_map);
var file: std.fs.File = undefined;
if (std.fs.path.isAbsolute(out_filename)) {
file = try std.fs.createFileAbsolute(
out_filename,
.{ .read = true, .truncate = true },
);
} else {
file = try std.fs.cwd().createFile(
out_filename,
// truncate: reduce file to length 0 if it exists
.{ .read = true, .truncate = true },
);
}
defer file.close();
var out = std.io.bufferedWriter(file.writer());
try html_gen.write(@TypeOf(out), out.writer());
try out.flush();
} | src/main.zig |
const std = @import("std");
const assert = std.debug.assert;
const tools = @import("tools");
const SkyMap = tools.Map(u8, 500, 500, true);
const Vec2 = tools.Vec2;
pub fn run(input: []const u8, allocator: std.mem.Allocator) ![2][]const u8 {
const Star = struct { p: Vec2, v: Vec2 };
var stars = std.ArrayList(Star).init(allocator);
defer stars.deinit();
{
var it = std.mem.tokenize(u8, input, "\n\r");
while (it.next()) |line| {
const fields = tools.match_pattern("position=<{}, {}> velocity=<{}, {}>", line) orelse unreachable;
try stars.append(Star{
.p = Vec2{ .x = @intCast(i32, fields[0].imm), .y = @intCast(i32, fields[1].imm) },
.v = Vec2{ .x = @intCast(i32, fields[2].imm), .y = @intCast(i32, fields[3].imm) },
});
}
}
// part1
var mem: [128 * 128]u8 = undefined;
var seconds: u32 = 0;
const ans1 = ans: {
var cur_stars = try allocator.dupe(Star, stars.items);
defer allocator.free(cur_stars);
var sky_map: []const u8 = "";
var sky_size: isize = 999999999;
while (true) {
seconds += 1;
var bbox = tools.BBox.empty;
for (cur_stars) |*it| {
it.p = Vec2.add(it.p, it.v);
bbox.max = Vec2.max(it.p, bbox.max);
bbox.min = Vec2.min(it.p, bbox.min);
}
const size = (try std.math.absInt(bbox.max.x - bbox.min.x)) + (try std.math.absInt(bbox.max.y - bbox.min.y));
if (sky_size >= size) {
sky_size = size;
} else {
//std.debug.print("bbox={}\n", .{bbox});
break;
}
if (size < 100) {
var sky = SkyMap{ .default_tile = ' ' };
sky.fill(' ', bbox);
for (cur_stars) |it| {
sky.set(it.p, '#');
}
sky_map = sky.printToBuf(null, bbox, null, &mem);
// std.debug.print("map=\n{}", .{sky_map});
}
}
break :ans sky_map;
};
// part2
const ans2 = seconds - 1;
return [_][]const u8{
try std.fmt.allocPrint(allocator, "{s}", .{ans1}),
try std.fmt.allocPrint(allocator, "{}", .{ans2}),
};
}
pub const main = tools.defaultMain("2018/input_day10.txt", run); | 2018/day10.zig |
const std = @import("std");
const tools = @import("tools");
const with_trace = false;
fn trace(comptime fmt: []const u8, args: anytype) void {
if (with_trace) std.debug.print(fmt, args);
}
const assert = std.debug.assert;
pub const main = tools.defaultMain("2021/day11.txt", run);
const Vec2 = tools.Vec2;
const Map = tools.Map(u8, 10, 10, false);
pub fn run(input: []const u8, gpa: std.mem.Allocator) tools.RunError![2][]const u8 {
const init = blk: {
var map = Map{ .default_tile = 0 };
var it = std.mem.tokenize(u8, input, "\n");
var p = Vec2{ 0, 0 };
while (it.next()) |line| : (p += Vec2{ 0, 1 }) {
map.setLine(p, std.mem.trim(u8, line, " \t\r\n"));
}
break :blk map;
};
{
var buf: [128]u8 = undefined;
trace("initial: (size={})\n{s}\n", .{ init.bbox.size(), init.printToBuf(&buf, .{}) });
}
const ans = ans: {
var gen: u32 = 1;
var accu_flashes_to100: u32 = 0;
var state = init;
while (true) : (gen += 1) {
state.fillIncrement(1, init.bbox);
var flashed = Map{ .default_tile = 0 };
// do the flashes
{
var flashes: u32 = 0;
flashed.fill(0, init.bbox);
var dirty = true;
while (dirty) {
dirty = false;
var it = state.iter(null);
while (it.nextEx()) |t| {
if (t.t.* > '9' and flashed.at(t.p) == 0) {
dirty = true;
flashed.set(t.p, 1);
flashes += 1;
for (tools.Vec.cardinal8_dirs) |d| {
if (state.get(t.p + d)) |n| {
state.set(t.p + d, n +| 1);
}
}
}
}
}
if (gen <= 100)
accu_flashes_to100 += flashes;
if (flashes == init.bbox.size()) {
trace("all flashes at {}\n", .{gen});
break;
}
}
// consume energy
{
var it = state.iter(null);
while (it.nextEx()) |t| {
if (t.t.* > '9') t.t.* = '0';
}
var buf: [128]u8 = undefined;
trace("gen{}:\n{s}\n", .{ gen, state.printToBuf(&buf, .{}) });
}
}
break :ans [2]u32{ accu_flashes_to100, gen };
};
return [_][]const u8{
try std.fmt.allocPrint(gpa, "{}", .{ans[0]}),
try std.fmt.allocPrint(gpa, "{}", .{ans[1]}),
};
}
test {
const res0 = try run(
\\11111
\\19991
\\19191
\\19991
\\11111
, std.testing.allocator);
defer std.testing.allocator.free(res0[0]);
defer std.testing.allocator.free(res0[1]);
const res = try run(
\\5483143223
\\2745854711
\\5264556173
\\6141336146
\\6357385478
\\4167524645
\\2176841721
\\6882881134
\\4846848554
\\5283751526
, std.testing.allocator);
defer std.testing.allocator.free(res[0]);
defer std.testing.allocator.free(res[1]);
try std.testing.expectEqualStrings("1656", res[0]);
try std.testing.expectEqualStrings("195", res[1]);
} | 2021/day11.zig |
const std = @import("std");
const lola = @import("lola");
////
// Multi-environment communication example:
// In this example, we have three scripts:
// server.lola: Exporting a simple key-value store
// client-a.lola: Writes "Hello, World!" into the store in server
// client-b.lola: Reads the message from the server and prints it
//
// Real world application:
// This shows the inter-environment communication capabilities of LoLa,
// which is useful for games with interactive computer systems that need
// to interact with each other in a user-scriptable way.
//
// Each computer is its own environment, providing a simple script.
// Computers/Environments can communicate via a object interface, exposing
// other computers as a LoLa object and allowing those environments to
// communicate with each other.
//
pub const ObjectPool = lola.runtime.ObjectPool([_]type{
lola.libs.runtime.LoLaDictionary,
lola.libs.runtime.LoLaList,
// Environment is a non-serializable object. If you need to serialize a whole VM state with cross-references,
// provide your own wrapper implementation
lola.runtime.Environment,
});
pub fn main() anyerror!u8 {
var gpa_state = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa_state.deinit();
const allocator = gpa_state.allocator();
var diagnostics = lola.compiler.Diagnostics.init(allocator);
defer {
for (diagnostics.messages.items) |msg| {
std.debug.print("{}\n", .{msg});
}
diagnostics.deinit();
}
var server_unit = (try lola.compiler.compile(allocator, &diagnostics, "server.lola", @embedFile("server.lola"))) orelse return 1;
defer server_unit.deinit();
var client_a_unit = (try lola.compiler.compile(allocator, &diagnostics, "client-a.lola", @embedFile("client-a.lola"))) orelse return 1;
defer client_a_unit.deinit();
var client_b_unit = (try lola.compiler.compile(allocator, &diagnostics, "client-b.lola", @embedFile("client-b.lola"))) orelse return 1;
defer client_b_unit.deinit();
var pool = ObjectPool.init(allocator);
defer pool.deinit();
var server_env = try lola.runtime.Environment.init(allocator, &server_unit, pool.interface());
defer server_env.deinit();
var client_a_env = try lola.runtime.Environment.init(allocator, &client_a_unit, pool.interface());
defer client_a_env.deinit();
var client_b_env = try lola.runtime.Environment.init(allocator, &client_b_unit, pool.interface());
defer client_b_env.deinit();
for ([_]*lola.runtime.Environment{ &server_env, &client_a_env, &client_b_env }) |env| {
try env.installModule(lola.libs.std, lola.runtime.Context.null_pointer);
try env.installModule(lola.libs.runtime, lola.runtime.Context.null_pointer);
}
var server_obj_handle = try pool.createObject(&server_env);
// Important: The environment is stored in the ObjectPool,
// but will be destroyed earlier by us, so we have to remove it
// from the pool before we destroy `server_env`!
defer pool.destroyObject(server_obj_handle);
const getServerFunction = lola.runtime.Function{
.syncUser = .{
.context = lola.runtime.Context.make(*lola.runtime.ObjectHandle, &server_obj_handle),
.call = struct {
fn call(
environment: *lola.runtime.Environment,
context: lola.runtime.Context,
args: []const lola.runtime.Value,
) anyerror!lola.runtime.Value {
_ = environment;
_ = args;
return lola.runtime.Value.initObject(context.cast(*lola.runtime.ObjectHandle).*);
}
}.call,
.destructor = null,
},
};
try client_a_env.installFunction("GetServer", getServerFunction);
try client_b_env.installFunction("GetServer", getServerFunction);
// First, initialize the server and let it initialize `storage`.
{
var vm = try lola.runtime.VM.init(allocator, &server_env);
defer vm.deinit();
const result = try vm.execute(null);
if (result != .completed)
return error.CouldNotCompleteCode;
}
// Then, let Client A execute
{
var vm = try lola.runtime.VM.init(allocator, &client_a_env);
defer vm.deinit();
const result = try vm.execute(null);
if (result != .completed)
return error.CouldNotCompleteCode;
}
// Then, let Client B execute
{
var vm = try lola.runtime.VM.init(allocator, &client_b_env);
defer vm.deinit();
const result = try vm.execute(null);
if (result != .completed)
return error.CouldNotCompleteCode;
}
return 0;
} | examples/host/multi-environment/main.zig |
usingnamespace @import("../Zig-PSP/src/psp/include/pspgu.zig");
usingnamespace @import("../Zig-PSP/src/psp/include/pspdisplay.zig");
usingnamespace @import("../Zig-PSP/src/psp/include/pspctrl.zig");
usingnamespace @import("../Zig-PSP/src/psp/include/pspgum.zig");
const vram = @import("../Zig-PSP/src/psp/utils/vram.zig");
usingnamespace @import("../Zig-PSP/src/psp/utils/constants.zig");
var fbp0: ?*c_void = null;
var fbp1: ?*c_void = null;
var zbp0: ?*c_void = null;
var display_list : [0x20000]u32 align(16) = [_]u32{0} ** 0x20000;
pub fn init() void {
fbp0 = vram.allocVramRelative(SCR_BUF_WIDTH, SCREEN_HEIGHT, GuPixelMode.Psm8888);
fbp0 = vram.allocVramRelative(SCR_BUF_WIDTH, SCREEN_HEIGHT, GuPixelMode.Psm8888);
zbp0 = vram.allocVramRelative(SCR_BUF_WIDTH, SCREEN_HEIGHT, GuPixelMode.Psm4444);
sceGuInit();
sceGuStart(GuContextType.Direct, @ptrCast(*c_void, &display_list));
sceGuDrawBuffer(GuPixelMode.Psm8888, fbp0, SCR_BUF_WIDTH);
sceGuDispBuffer(SCREEN_WIDTH, SCREEN_HEIGHT, fbp1, SCR_BUF_WIDTH);
sceGuDepthBuffer(zbp0, SCR_BUF_WIDTH);
sceGuOffset(2048 - SCREEN_WIDTH/2, 2048 - SCREEN_HEIGHT/2);
sceGuViewport(2048, 2048, SCREEN_WIDTH, SCREEN_HEIGHT);
sceGuDepthRange(65535, 0);
sceGuScissor(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT);
sceGuEnable(GuState.ScissorTest);
sceGuDepthFunc(DepthFunc.GreaterOrEqual);
sceGuEnable(GuState.DepthTest);
sceGuDisable(GuState.Texture2D);
sceGuEnable(GuState.ClipPlanes);
sceGuEnable(GuState.CullFace);
sceGuFrontFace(FrontFaceDirection.CounterClockwise);
sceGuEnable(GuState.Blend);
sceGuBlendFunc(BlendOp.Add, BlendArg.SrcAlpha, BlendArg.OneMinusSrcAlpha, 0, 0);
sceGuAlphaFunc(AlphaFunc.Greater, 0.0, 0xff);
sceGuStencilFunc(StencilFunc.Always, 1, 1);
sceGuStencilOp(StencilOperation.Keep, StencilOperation.Keep, StencilOperation.Replace);
sceGuTexFilter(TextureFilter.Linear, TextureFilter.Linear);
sceGuShadeModel(ShadeModel.Smooth);
sceGuEnable(GuState.Texture2D);
guFinish();
displayWaitVblankStart();
sceGuDisplay(true);
_ = sceCtrlSetSamplingCycle(0);
_ = ctrlSetSamplingMode(PspCtrlMode.Analog);
}
pub fn deinit() void {
sceGuTerm();
}
pub fn recordCommands() void {
sceGuStart(GuContextType.Direct, @ptrCast(*c_void, &display_list));
}
var clearColor : u32 = 0xff000000;
pub fn setClearColor(r: u8, g: u8, b: u8, a: u8) void {
clearColor = rgba(r, g, b, a);
}
pub fn set2D() void{
sceGumMatrixMode(MatrixMode.Projection);
sceGumOrtho(0, 480, 272, 0, -16, 15);
sceGumMatrixMode(MatrixMode.View);
sceGumLoadIdentity();
sceGumMatrixMode(MatrixMode.Model);
sceGumLoadIdentity();
}
pub fn clear() void {
sceGuClearColor(clearColor);
sceGuClear(@enumToInt(ClearBitFlags.ColorBuffer) + @enumToInt(ClearBitFlags.DepthBuffer) + @enumToInt(ClearBitFlags.StencilBuffer));
sceGuClearDepth(0);
}
pub fn submitCommands() void {
guFinish();
_ = sceGuSync(GuSyncMode.Finish, GuSyncBehavior.Wait);
_ = sceGuSwapBuffers();
displayWaitVblankStart();
} | src/gfx/renderer.zig |
const std = @import("../../std.zig");
const net = @import("net.zig");
const os = std.os;
const fmt = std.fmt;
const mem = std.mem;
const time = std.time;
const meta = std.meta;
const native_os = std.Target.current.os;
const native_endian = std.Target.current.cpu.arch.endian();
const Buffer = std.x.os.Buffer;
const assert = std.debug.assert;
/// A generic, cross-platform socket abstraction.
pub const Socket = struct {
/// A socket-address pair.
pub const Connection = struct {
socket: Socket,
address: Socket.Address,
/// Enclose a socket and address into a socket-address pair.
pub fn from(socket: Socket, address: Socket.Address) Socket.Connection {
return .{ .socket = socket, .address = address };
}
};
/// A generic socket address abstraction. It is safe to directly access and modify
/// the fields of a `Socket.Address`.
pub const Address = union(enum) {
pub const Native = struct {
pub const requires_prepended_length = native_os.getVersionRange() == .semver;
pub const Length = if (requires_prepended_length) u8 else [0]u8;
pub const Family = if (requires_prepended_length) u8 else c_ushort;
/// POSIX `sockaddr_storage`. The expected size and alignment is specified in IETF RFC 2553.
pub const Storage = extern struct {
pub const expected_size = 128;
pub const expected_alignment = 8;
pub const padding_size = expected_size -
mem.alignForward(@sizeOf(Address.Native.Length), expected_alignment) -
mem.alignForward(@sizeOf(Address.Native.Family), expected_alignment);
len: Address.Native.Length align(expected_alignment) = undefined,
family: Address.Native.Family align(expected_alignment) = undefined,
padding: [padding_size]u8 align(expected_alignment) = undefined,
comptime {
assert(@sizeOf(Storage) == Storage.expected_size);
assert(@alignOf(Storage) == Storage.expected_alignment);
}
};
};
ipv4: net.IPv4.Address,
ipv6: net.IPv6.Address,
/// Instantiate a new address with a IPv4 host and port.
pub fn initIPv4(host: net.IPv4, port: u16) Socket.Address {
return .{ .ipv4 = .{ .host = host, .port = port } };
}
/// Instantiate a new address with a IPv6 host and port.
pub fn initIPv6(host: net.IPv6, port: u16) Socket.Address {
return .{ .ipv6 = .{ .host = host, .port = port } };
}
/// Parses a `sockaddr` into a generic socket address.
pub fn fromNative(address: *align(4) const os.sockaddr) Socket.Address {
switch (address.family) {
os.AF_INET => {
const info = @ptrCast(*const os.sockaddr_in, address);
const host = net.IPv4{ .octets = @bitCast([4]u8, info.addr) };
const port = mem.bigToNative(u16, info.port);
return Socket.Address.initIPv4(host, port);
},
os.AF_INET6 => {
const info = @ptrCast(*const os.sockaddr_in6, address);
const host = net.IPv6{ .octets = info.addr, .scope_id = info.scope_id };
const port = mem.bigToNative(u16, info.port);
return Socket.Address.initIPv6(host, port);
},
else => unreachable,
}
}
/// Encodes a generic socket address into an extern union that may be reliably
/// casted into a `sockaddr` which may be passed into socket syscalls.
pub fn toNative(self: Socket.Address) extern union {
ipv4: os.sockaddr_in,
ipv6: os.sockaddr_in6,
} {
return switch (self) {
.ipv4 => |address| .{
.ipv4 = .{
.addr = @bitCast(u32, address.host.octets),
.port = mem.nativeToBig(u16, address.port),
},
},
.ipv6 => |address| .{
.ipv6 = .{
.addr = address.host.octets,
.port = mem.nativeToBig(u16, address.port),
.scope_id = address.host.scope_id,
.flowinfo = 0,
},
},
};
}
/// Returns the number of bytes that make up the `sockaddr` equivalent to the address.
pub fn getNativeSize(self: Socket.Address) u32 {
return switch (self) {
.ipv4 => @sizeOf(os.sockaddr_in),
.ipv6 => @sizeOf(os.sockaddr_in6),
};
}
/// Implements the `std.fmt.format` API.
pub fn format(
self: Socket.Address,
comptime layout: []const u8,
opts: fmt.FormatOptions,
writer: anytype,
) !void {
_ = opts;
_ = layout;
switch (self) {
.ipv4 => |address| try fmt.format(writer, "{}:{}", .{ address.host, address.port }),
.ipv6 => |address| try fmt.format(writer, "{}:{}", .{ address.host, address.port }),
}
}
};
/// POSIX `msghdr`. Denotes a destination address, set of buffers, control data, and flags. Ported
/// directly from musl.
pub const Message = if (native_os.isAtLeast(.windows, .vista) != null and native_os.isAtLeast(.windows, .vista).?)
extern struct {
name: usize = @ptrToInt(@as(?[*]u8, null)),
name_len: c_int = 0,
buffers: usize = undefined,
buffers_len: c_ulong = undefined,
control: Buffer = .{
.ptr = @ptrToInt(@as(?[*]u8, null)),
.len = 0,
},
flags: c_ulong = 0,
pub usingnamespace MessageMixin(Message);
}
else if (native_os.tag == .windows)
extern struct {
name: usize = @ptrToInt(@as(?[*]u8, null)),
name_len: c_int = 0,
buffers: usize = undefined,
buffers_len: u32 = undefined,
control: Buffer = .{
.ptr = @ptrToInt(@as(?[*]u8, null)),
.len = 0,
},
flags: u32 = 0,
pub usingnamespace MessageMixin(Message);
}
else if (@sizeOf(usize) > 4 and native_endian == .Big)
extern struct {
name: usize = @ptrToInt(@as(?[*]u8, null)),
name_len: c_uint = 0,
buffers: usize = undefined,
_pad_1: c_int = 0,
buffers_len: c_int = undefined,
control: usize = @ptrToInt(@as(?[*]u8, null)),
_pad_2: c_int = 0,
control_len: c_uint = 0,
flags: c_int = 0,
pub usingnamespace MessageMixin(Message);
}
else if (@sizeOf(usize) > 4 and native_endian == .Little)
extern struct {
name: usize = @ptrToInt(@as(?[*]u8, null)),
name_len: c_uint = 0,
buffers: usize = undefined,
buffers_len: c_int = undefined,
_pad_1: c_int = 0,
control: usize = @ptrToInt(@as(?[*]u8, null)),
control_len: c_uint = 0,
_pad_2: c_int = 0,
flags: c_int = 0,
pub usingnamespace MessageMixin(Message);
}
else
extern struct {
name: usize = @ptrToInt(@as(?[*]u8, null)),
name_len: c_uint = 0,
buffers: usize = undefined,
buffers_len: c_int = undefined,
control: usize = @ptrToInt(@as(?[*]u8, null)),
control_len: c_uint = 0,
flags: c_int = 0,
pub usingnamespace MessageMixin(Message);
};
fn MessageMixin(comptime Self: type) type {
return struct {
pub fn fromBuffers(buffers: []const Buffer) Self {
var self: Self = .{};
self.setBuffers(buffers);
return self;
}
pub fn setName(self: *Self, name: []const u8) void {
self.name = @ptrToInt(name.ptr);
self.name_len = @intCast(meta.fieldInfo(Self, .name_len).field_type, name.len);
}
pub fn setBuffers(self: *Self, buffers: []const Buffer) void {
self.buffers = @ptrToInt(buffers.ptr);
self.buffers_len = @intCast(meta.fieldInfo(Self, .buffers_len).field_type, buffers.len);
}
pub fn setControl(self: *Self, control: []const u8) void {
if (native_os.tag == .windows) {
self.control = Buffer.from(control);
} else {
self.control = @ptrToInt(control.ptr);
self.control_len = @intCast(meta.fieldInfo(Self, .control_len).field_type, control.len);
}
}
pub fn setFlags(self: *Self, flags: u32) void {
self.flags = @intCast(meta.fieldInfo(Self, .flags).field_type, flags);
}
pub fn getName(self: Self) []const u8 {
return @intToPtr([*]const u8, self.name)[0..@intCast(usize, self.name_len)];
}
pub fn getBuffers(self: Self) []const Buffer {
return @intToPtr([*]const Buffer, self.buffers)[0..@intCast(usize, self.buffers_len)];
}
pub fn getControl(self: Self) []const u8 {
if (native_os.tag == .windows) {
return self.control.into();
} else {
return @intToPtr([*]const u8, self.control)[0..@intCast(usize, self.control_len)];
}
}
pub fn getFlags(self: Self) u32 {
return @intCast(u32, self.flags);
}
};
}
/// POSIX `linger`, denoting the linger settings of a socket.
///
/// Microsoft's documentation and glibc denote the fields to be unsigned
/// short's on Windows, whereas glibc and musl denote the fields to be
/// int's on every other platform.
pub const Linger = extern struct {
pub const Field = switch (native_os.tag) {
.windows => c_ushort,
else => c_int,
};
enabled: Field,
timeout_seconds: Field,
pub fn init(timeout_seconds: ?u16) Socket.Linger {
return .{
.enabled = @intCast(Socket.Linger.Field, @boolToInt(timeout_seconds != null)),
.timeout_seconds = if (timeout_seconds) |seconds| @intCast(Socket.Linger.Field, seconds) else 0,
};
}
};
/// Possible set of flags to initialize a socket with.
pub const InitFlags = enum {
// Initialize a socket to be non-blocking.
nonblocking,
// Have a socket close itself on exec syscalls.
close_on_exec,
};
/// The underlying handle of a socket.
fd: os.socket_t,
/// Enclose a socket abstraction over an existing socket file descriptor.
pub fn from(fd: os.socket_t) Socket {
return Socket{ .fd = fd };
}
/// Mix in socket syscalls depending on the platform we are compiling against.
pub usingnamespace switch (native_os.tag) {
.windows => @import("socket_windows.zig"),
else => @import("socket_posix.zig"),
}.Mixin(Socket);
}; | lib/std/x/os/socket.zig |
const std = @import("std");
const lpc1768 = @import("lpc1768");
const ISRHandler = fn () callconv(.C) void;
const start_of_ram = 0x10000000;
const stack_size = 0x2000;
const initial_sp = start_of_ram + stack_size;
var mutable_vector_table: @TypeOf(fixed_vector_table) = undefined;
extern var __bss__start: c_void;
extern var __bss__end: c_void;
extern var __text__end: c_void;
extern var __data__start: c_void;
extern var __data__end: c_void;
export fn _start() callconv(.C) noreturn {
mutable_vector_table = fixed_vector_table;
lpc1768.SCB.VTOR = @ptrToInt(&mutable_vector_table);
lpc1768.SCB.SHP[7] = 1; // SVC has less priority than all fault handlers
lpc1768.SCB.SHCSR = 0x00070000; // enable fault handler
const bss = @ptrCast([*]u8, &__bss__start)[0 .. @ptrToInt(&__bss__end) - @ptrToInt(&__bss__end)];
const ro_data = @ptrCast([*]const u8, &__text__end)[0 .. @ptrToInt(&__data__end) - @ptrToInt(&__data__start)];
const rw_data = @ptrCast([*]u8, &__data__start)[0..ro_data.len];
// BSS Segment löschen
std.mem.set(u8, bss, 0);
// Datasegment aus Flash in RAM kopieren
std.mem.copy(u8, rw_data, ro_data);
@import("root").main() catch |err| {
@panic(@errorName(err));
};
while (true) {
lpc1768.__disable_irq();
lpc1768.__disable_fault_irq();
lpc1768.__WFE();
}
}
export fn _nmi() callconv(.C) void {
@panic("nmi");
}
export fn _hardFault() callconv(.C) void {
@panic("hard fault");
}
export fn _mpuFault() callconv(.C) void {
@panic("mpu fault");
}
export fn _busFault() callconv(.C) void {
@panic("bus fault");
}
export fn _usageFault() callconv(.C) void {
@panic("usage fault");
}
export fn _unhandledInterrupt() callconv(.C) void {
@panic("Unhandled interrupt!");
}
comptime {
_ = fixed_vector_table;
}
const VectorTable = extern struct {
initial_stack_pointer: u32 = initial_sp,
reset: ISRHandler = _start,
nmi: ISRHandler = _nmi,
hard_fault: ISRHandler = _hardFault,
mpu_fault: ISRHandler = _mpuFault,
bus_fault: ISRHandler = _busFault,
usage_fault: ISRHandler = _usageFault,
checksum: u32 = undefined,
reserved0: u32 = 0,
reserved1: u32 = 0,
reserved2: u32 = 0,
svcall: ISRHandler = _unhandledInterrupt,
debug_monitor: ISRHandler = _unhandledInterrupt,
reserved3: u32 = 0,
pendsv: ISRHandler = _unhandledInterrupt,
systick: ISRHandler = _unhandledInterrupt,
wdt: ISRHandler = _unhandledInterrupt,
timer0: ISRHandler = _unhandledInterrupt,
timer1: ISRHandler = _unhandledInterrupt,
timer2: ISRHandler = _unhandledInterrupt,
timer3: ISRHandler = _unhandledInterrupt,
uart0: ISRHandler = _unhandledInterrupt,
uart1: ISRHandler = _unhandledInterrupt,
uart2: ISRHandler = _unhandledInterrupt,
uart3: ISRHandler = _unhandledInterrupt,
pwm1: ISRHandler = _unhandledInterrupt,
i2c0: ISRHandler = _unhandledInterrupt,
i2c1: ISRHandler = _unhandledInterrupt,
i2c2: ISRHandler = _unhandledInterrupt,
spi: ISRHandler = _unhandledInterrupt,
ssp0: ISRHandler = _unhandledInterrupt,
ssp1: ISRHandler = _unhandledInterrupt,
pll0: ISRHandler = _unhandledInterrupt,
rtc: ISRHandler = _unhandledInterrupt,
eint0: ISRHandler = _unhandledInterrupt,
eint1: ISRHandler = _unhandledInterrupt,
eint2: ISRHandler = _unhandledInterrupt,
eint3: ISRHandler = _unhandledInterrupt,
adc: ISRHandler = _unhandledInterrupt,
bod: ISRHandler = _unhandledInterrupt,
usb: ISRHandler = _unhandledInterrupt,
can: ISRHandler = _unhandledInterrupt,
dma: ISRHandler = _unhandledInterrupt,
i2s: ISRHandler = _unhandledInterrupt,
enet: ISRHandler = _unhandledInterrupt,
rit: ISRHandler = _unhandledInterrupt,
mcpwm: ISRHandler = _unhandledInterrupt,
qei: ISRHandler = _unhandledInterrupt,
pll1: ISRHandler = _unhandledInterrupt,
};
export const fixed_vector_table: VectorTable linksection(".isr_vector") = VectorTable{}; | research-chamber/src/boot.zig |
const x86_64 = @import("../index.zig");
const bitjuggle = @import("bitjuggle");
const std = @import("std");
/// Returns the current value of the code segment register.
pub fn getCs() x86_64.structures.gdt.SegmentSelector {
return .{
.value = asm ("mov %%cs, %[ret]"
: [ret] "=r" (-> u16),
),
};
}
/// Reload code segment register.
///
/// The segment base and limit are unused in 64-bit mode. Only the L (long), D
/// (default operation size), and DPL (descriptor privilege-level) fields of the
/// descriptor are recognized. So changing the segment register can be used to
/// change privilege level or enable/disable long mode.
///
/// Note this is special since we cannot directly move to [`CS`]. Instead we
/// push the new segment selector and return value on the stack and use
/// `retfq` to reload [`CS`] and continue at the end of our function.
pub fn setCs(sel: x86_64.structures.gdt.SegmentSelector) void {
asm volatile ("pushq %[sel]; leaq 1f(%%rip), %%rax; pushq %%rax; lretq; 1:"
:
: [sel] "ri" (@as(u64, sel.value)),
: "rax", "memory"
);
}
/// Returns the current value of the stack segment register.
pub fn getSs() x86_64.structures.gdt.SegmentSelector {
return .{
.value = asm ("mov %%ss, %[ret]"
: [ret] "=r" (-> u16),
),
};
}
/// Reload stack segment register.
///
/// Entirely unused in 64-bit mode; setting the segment register does nothing.
/// However, in ring 3, the SS register still has to point to a valid
/// [`Descriptor`] (it cannot be zero). This means a user-mode read/write
/// segment descriptor must be present in the GDT.
///
/// This register is also set by the `syscall`/`sysret` and
/// `sysenter`/`sysexit` instructions (even on 64-bit transitions). This is to
/// maintain symmetry with 32-bit transitions where setting SS actually will
/// actually have an effect.
pub fn setSs(sel: x86_64.structures.gdt.SegmentSelector) void {
asm volatile ("movw %[sel], %%ss"
:
: [sel] "r" (sel.value),
: "memory"
);
}
/// Returns the current value of the data segment register.
pub fn getDs() x86_64.structures.gdt.SegmentSelector {
return .{
.value = asm ("mov %%ds, %[ret]"
: [ret] "=r" (-> u16),
),
};
}
/// Reload data segment register.
///
/// Entirely unused in 64-bit mode; setting the segment register does nothing.
pub fn setDs(sel: x86_64.structures.gdt.SegmentSelector) void {
asm volatile ("movw %[sel], %%ds"
:
: [sel] "r" (sel.value),
: "memory"
);
}
/// Returns the current value of the es segment register.
pub fn getEs() x86_64.structures.gdt.SegmentSelector {
return .{
.value = asm ("mov %%es, %[ret]"
: [ret] "=r" (-> u16),
),
};
}
/// Reload es segment register.
///
/// Entirely unused in 64-bit mode; setting the segment register does nothing.
pub fn setEs(sel: x86_64.structures.gdt.SegmentSelector) void {
asm volatile ("movw %[sel], %%es"
:
: [sel] "r" (sel.value),
: "memory"
);
}
/// Returns the current value of the fs segment register.
pub fn getFs() x86_64.structures.gdt.SegmentSelector {
return .{
.value = asm ("mov %%fs, %[ret]"
: [ret] "=r" (-> u16),
),
};
}
/// Reload fs segment register.
pub fn setFs(sel: x86_64.structures.gdt.SegmentSelector) void {
asm volatile ("movw %[sel], %%fs"
:
: [sel] "r" (sel.value),
: "memory"
);
}
/// Returns the current value of the gs segment register.
pub fn getGs() x86_64.structures.gdt.SegmentSelector {
return .{
.value = asm ("mov %%gs, %[ret]"
: [ret] "=r" (-> u16),
),
};
}
/// Reload gs segment register.
pub fn setGs(sel: x86_64.structures.gdt.SegmentSelector) void {
asm volatile ("movw %[sel], %%gs"
:
: [sel] "r" (sel.value),
: "memory"
);
}
/// Swap `KernelGsBase` MSR and `GsBase` MSR.
pub fn swapGs() void {
asm volatile ("swapgs" ::: "memory");
}
/// Reads the fs segment base address
///
/// ## Exceptions
///
/// If `CR4.fsgsbase` is not set, this instruction will throw an `#UD`.
pub fn readFsBase() u64 {
return asm ("rdfsbase %[ret]"
: [ret] "=r" (-> u64),
);
}
/// Writes the fs segment base address
///
/// ## Exceptions
///
/// If `CR4.fsgsbase` is not set, this instruction will throw an `#UD`.
///
/// The caller must ensure that this write operation has no unsafe side
/// effects, as the fs segment base address is often used for thread
/// local storage.
pub fn writeFsBase(value: u64) void {
asm volatile ("wrfsbase %[val]"
:
: [val] "r" (value),
);
}
/// Reads the gs segment base address
///
/// ## Exceptions
///
/// If `CR4.fsgsbase` is not set, this instruction will throw an `#UD`.
pub fn readGsBase() u64 {
return asm ("rdgsbase %[ret]"
: [ret] "=r" (-> u64),
);
}
/// Writes the gs segment base address
///
/// ## Exceptions
///
/// If `CR4.fsgsbase` is not set, this instruction will throw an `#UD`.
///
/// The caller must ensure that this write operation has no unsafe side
/// effects, as the gs segment base address might be in use.
pub fn writeGsBase(value: u64) void {
asm volatile ("wrgsbase %[val]"
:
: [val] "r" (value),
);
}
comptime {
std.testing.refAllDecls(@This());
} | src/instructions/segmentation.zig |
const builtin = @import("builtin");
const std = @import("std");
const math = std.math;
const assert = std.debug.assert;
const L = std.unicode.utf8ToUtf16LeStringLiteral;
const zwin32 = @import("zwin32");
const w32 = zwin32.base;
const d3d12 = zwin32.d3d12;
const hrPanic = zwin32.hrPanic;
const hrPanicOnFail = zwin32.hrPanicOnFail;
const zd3d12 = @import("zd3d12");
const common = @import("common");
const c = common.c;
const vm = common.vectormath;
const GuiRenderer = common.GuiRenderer;
const zb = @cImport(@cInclude("cbullet.h"));
const zmesh = @import("zmesh");
const Vec3 = vm.Vec3;
const Vec4 = vm.Vec4;
const Mat4 = vm.Mat4;
pub export const D3D12SDKVersion: u32 = 4;
pub export const D3D12SDKPath: [*:0]const u8 = ".\\d3d12\\";
const content_dir = @import("build_options").content_dir;
const window_name = "zig-gamedev: bullet physics test";
const window_width = 1920;
const window_height = 1080;
const num_msaa_samples = 4;
const camera_fovy: f32 = math.pi / @as(f32, 3.0);
const default_linear_damping: f32 = 0.1;
const default_angular_damping: f32 = 0.1;
const default_world_friction: f32 = 0.15;
const BodyWithPivot = struct {
body: zb.CbtBodyHandle,
pivot: Vec3,
};
const Scene = enum {
scene1,
scene2,
scene3,
scene4,
};
const PhysicsObjectsPool = struct {
const max_num_bodies = 2 * 1024;
const max_num_constraints = 54;
const max_num_shapes = 48;
bodies: []zb.CbtBodyHandle,
constraints: []zb.CbtConstraintHandle,
shapes: []zb.CbtShapeHandle,
fn init() PhysicsObjectsPool {
const mem = std.heap.page_allocator.alloc(
zb.CbtBodyHandle,
max_num_bodies + max_num_constraints + max_num_shapes,
) catch unreachable;
const bodies = mem[0..max_num_bodies];
const constraints = @ptrCast([*]zb.CbtConstraintHandle, mem.ptr)[max_num_bodies .. max_num_bodies +
max_num_constraints];
const shapes = @ptrCast([*]zb.CbtShapeHandle, mem.ptr)[max_num_bodies + max_num_constraints .. max_num_bodies +
max_num_constraints + max_num_shapes];
var pool = PhysicsObjectsPool{
.bodies = bodies,
.constraints = constraints,
.shapes = shapes,
};
// Bodies
zb.cbtBodyAllocateBatch(max_num_bodies, pool.bodies.ptr);
// Constraints
{
var counter: u32 = 0;
var i: u32 = 0;
while (i < 32) : (i += 1) {
pool.constraints[counter] = zb.cbtConAllocate(zb.CBT_CONSTRAINT_TYPE_POINT2POINT);
counter += 1;
}
i = 0;
while (i < 3) : (i += 1) {
pool.constraints[counter] = zb.cbtConAllocate(zb.CBT_CONSTRAINT_TYPE_GEAR);
counter += 1;
}
i = 0;
while (i < 8) : (i += 1) {
pool.constraints[counter] = zb.cbtConAllocate(zb.CBT_CONSTRAINT_TYPE_HINGE);
counter += 1;
}
i = 0;
while (i < 8) : (i += 1) {
pool.constraints[counter] = zb.cbtConAllocate(zb.CBT_CONSTRAINT_TYPE_SLIDER);
counter += 1;
}
i = 0;
while (i < 3) : (i += 1) {
pool.constraints[counter] = zb.cbtConAllocate(zb.CBT_CONSTRAINT_TYPE_CONETWIST);
counter += 1;
}
assert(counter == max_num_constraints);
}
// Shapes
{
var counter: u32 = 0;
var i: u32 = 0;
while (i < 8) : (i += 1) {
pool.shapes[counter] = zb.cbtShapeAllocate(zb.CBT_SHAPE_TYPE_SPHERE);
counter += 1;
}
i = 0;
while (i < 8) : (i += 1) {
pool.shapes[counter] = zb.cbtShapeAllocate(zb.CBT_SHAPE_TYPE_BOX);
counter += 1;
}
i = 0;
while (i < 8) : (i += 1) {
pool.shapes[counter] = zb.cbtShapeAllocate(zb.CBT_SHAPE_TYPE_COMPOUND);
counter += 1;
}
i = 0;
while (i < 8) : (i += 1) {
pool.shapes[counter] = zb.cbtShapeAllocate(zb.CBT_SHAPE_TYPE_TRIANGLE_MESH);
counter += 1;
}
i = 0;
while (i < 8) : (i += 1) {
pool.shapes[counter] = zb.cbtShapeAllocate(zb.CBT_SHAPE_TYPE_CYLINDER);
counter += 1;
}
i = 0;
while (i < 4) : (i += 1) {
pool.shapes[counter] = zb.cbtShapeAllocate(zb.CBT_SHAPE_TYPE_CAPSULE);
counter += 1;
}
i = 0;
while (i < 4) : (i += 1) {
pool.shapes[counter] = zb.cbtShapeAllocate(zb.CBT_SHAPE_TYPE_CONE);
counter += 1;
}
assert(counter == max_num_shapes);
}
return pool;
}
fn deinit(pool: *PhysicsObjectsPool, world: zb.CbtWorldHandle) void {
pool.destroyAllObjects(world);
zb.cbtBodyDeallocateBatch(@intCast(u32, pool.bodies.len), pool.bodies.ptr);
for (pool.constraints) |con| {
zb.cbtConDeallocate(con);
}
for (pool.shapes) |shape| {
zb.cbtShapeDeallocate(shape);
}
std.heap.page_allocator.free(pool.bodies);
pool.* = undefined;
}
fn getBody(pool: PhysicsObjectsPool) zb.CbtBodyHandle {
for (pool.bodies) |body| {
if (!zb.cbtBodyIsCreated(body)) {
return body;
}
}
unreachable;
}
fn getConstraint(pool: PhysicsObjectsPool, con_type: i32) zb.CbtConstraintHandle {
for (pool.constraints) |con| {
if (!zb.cbtConIsCreated(con) and zb.cbtConGetType(con) == con_type) {
return con;
}
}
unreachable;
}
fn getShape(pool: PhysicsObjectsPool, shape_type: i32) zb.CbtShapeHandle {
for (pool.shapes) |shape| {
if (!zb.cbtShapeIsCreated(shape) and zb.cbtShapeGetType(shape) == shape_type) {
return shape;
}
}
unreachable;
}
fn destroyAllObjects(pool: PhysicsObjectsPool, world: zb.CbtWorldHandle) void {
{
var i = zb.cbtWorldGetNumConstraints(world) - 1;
while (i >= 0) : (i -= 1) {
const constraint = zb.cbtWorldGetConstraint(world, i);
zb.cbtWorldRemoveConstraint(world, constraint);
zb.cbtConDestroy(constraint);
}
}
{
var i = zb.cbtWorldGetNumBodies(world) - 1;
while (i >= 0) : (i -= 1) {
const body = zb.cbtWorldGetBody(world, i);
zb.cbtWorldRemoveBody(world, body);
zb.cbtBodyDestroy(body);
}
}
for (pool.shapes) |shape| {
if (zb.cbtShapeIsCreated(shape)) {
if (zb.cbtShapeGetType(shape) == zb.CBT_SHAPE_TYPE_TRIANGLE_MESH) {
zb.cbtShapeTriMeshDestroy(shape);
} else {
zb.cbtShapeDestroy(shape);
}
}
}
}
};
const Vertex = struct {
position: [3]f32,
normal: [3]f32,
};
const Mesh = struct {
index_offset: u32,
vertex_offset: u32,
num_indices: u32,
num_vertices: u32,
};
const mesh_cube: u16 = 0;
const mesh_sphere: u16 = 1;
const mesh_capsule: u16 = 2;
const mesh_cylinder: u16 = 3;
const mesh_cone: u16 = 4;
const mesh_world: u16 = 5;
const mesh_compound: u16 = 0xffff;
fn loadAllMeshes(
all_meshes: *std.ArrayList(Mesh),
all_positions: *std.ArrayList([3]f32),
all_normals: *std.ArrayList([3]f32),
all_indices: *std.ArrayList(u32),
) void {
const paths = [_][:0]const u8{
content_dir ++ "cube.gltf",
content_dir ++ "sphere.gltf",
content_dir ++ "capsule.gltf",
content_dir ++ "cylinder.gltf",
content_dir ++ "cone.gltf",
content_dir ++ "world.gltf",
};
for (paths) |path| {
const pre_indices_len = all_indices.items.len;
const pre_positions_len = all_positions.items.len;
const data = zmesh.gltf.parseAndLoadFile(path) catch unreachable;
defer zmesh.gltf.freeData(data);
zmesh.gltf.appendMeshPrimitive(data, 0, 0, all_indices, all_positions, all_normals, null, null);
all_meshes.append(.{
.index_offset = @intCast(u32, pre_indices_len),
.vertex_offset = @intCast(u32, pre_positions_len),
.num_indices = @intCast(u32, all_indices.items.len - pre_indices_len),
.num_vertices = @intCast(u32, all_positions.items.len - pre_positions_len),
}) catch unreachable;
}
}
const Entity = extern struct {
body: zb.CbtBodyHandle,
base_color_roughness: Vec4,
size: Vec3,
flags: u16 = 0,
mesh_index: u16,
};
const Camera = struct {
position: Vec3,
forward: Vec3,
pitch: f32,
yaw: f32,
};
const DemoState = struct {
gctx: zd3d12.GraphicsContext,
guir: GuiRenderer,
frame_stats: common.FrameStats,
physics_debug_pso: zd3d12.PipelineHandle,
simple_entity_pso: zd3d12.PipelineHandle,
color_texture: zd3d12.ResourceHandle,
depth_texture: zd3d12.ResourceHandle,
color_texture_rtv: d3d12.CPU_DESCRIPTOR_HANDLE,
depth_texture_dsv: d3d12.CPU_DESCRIPTOR_HANDLE,
vertex_buffer: zd3d12.ResourceHandle,
index_buffer: zd3d12.ResourceHandle,
physics_debug: *PhysicsDebug,
physics_world: zb.CbtWorldHandle,
physics_objects_pool: PhysicsObjectsPool,
entities: std.ArrayList(Entity),
meshes: std.ArrayList(Mesh),
connected_bodies: std.ArrayList(BodyWithPivot),
motors: std.ArrayList(zb.CbtConstraintHandle),
current_scene_index: i32,
selected_entity_index: u32,
keyboard_delay: f32,
simulation_is_paused: bool,
do_simulation_step: bool,
camera: Camera,
mouse: struct {
cursor_prev_x: i32,
cursor_prev_y: i32,
},
pick: struct {
body: zb.CbtBodyHandle,
constraint: zb.CbtConstraintHandle,
saved_linear_damping: f32,
saved_angular_damping: f32,
distance: f32,
},
};
const PsoPhysicsDebug_Vertex = extern struct {
position: [3]f32,
color: u32,
};
const PsoPhysicsDebug_FrameConst = extern struct {
world_to_clip: Mat4,
};
const PsoSimpleEntity_DrawConst = extern struct {
object_to_world: Mat4,
base_color_roughness: Vec4,
flags: u32,
padding: [3]u32 = undefined,
};
const PsoSimpleEntity_FrameConst = extern struct {
world_to_clip: Mat4,
camera_position: Vec3,
};
const PhysicsDebug = struct {
lines: std.ArrayList(PsoPhysicsDebug_Vertex),
mode: i32 = 0,
fn init(alloc: std.mem.Allocator) PhysicsDebug {
return .{ .lines = std.ArrayList(PsoPhysicsDebug_Vertex).init(alloc) };
}
fn deinit(debug: *PhysicsDebug) void {
debug.lines.deinit();
debug.* = undefined;
}
fn drawLine1(
context: ?*anyopaque,
p0: [*c]const f32,
p1: [*c]const f32,
color: [*c]const f32,
) callconv(.C) void {
const debug = @ptrCast(*PhysicsDebug, @alignCast(@alignOf(PhysicsDebug), context.?));
const r = @floatToInt(u32, color[0] * 255.0);
const g = @floatToInt(u32, color[1] * 255.0) << 8;
const b = @floatToInt(u32, color[2] * 255.0) << 16;
const rgb = r | g | b;
debug.lines.append(.{ .position = .{ p0[0], p0[1], p0[2] }, .color = rgb }) catch unreachable;
debug.lines.append(.{ .position = .{ p1[0], p1[1], p1[2] }, .color = rgb }) catch unreachable;
}
fn drawLine2(
context: ?*anyopaque,
p0: [*c]const f32,
p1: [*c]const f32,
color0: [*c]const f32,
color1: [*c]const f32,
) callconv(.C) void {
const debug = @ptrCast(*PhysicsDebug, @alignCast(@alignOf(PhysicsDebug), context.?));
const r0 = @floatToInt(u32, color0[0] * 255.0);
const g0 = @floatToInt(u32, color0[1] * 255.0) << 8;
const b0 = @floatToInt(u32, color0[2] * 255.0) << 16;
const rgb0 = r0 | g0 | b0;
const r1 = @floatToInt(u32, color1[0] * 255.0);
const g1 = @floatToInt(u32, color1[1] * 255.0) << 8;
const b1 = @floatToInt(u32, color1[2] * 255.0) << 16;
const rgb1 = r1 | g1 | b1;
debug.lines.append(.{ .position = .{ p0[0], p0[1], p0[2] }, .color = rgb0 }) catch unreachable;
debug.lines.append(.{ .position = .{ p1[0], p1[1], p1[2] }, .color = rgb1 }) catch unreachable;
}
};
var shape_sphere_r1: zb.CbtShapeHandle = undefined;
var shape_box_e111: zb.CbtShapeHandle = undefined;
var shape_world: zb.CbtShapeHandle = undefined;
fn createScene1(
world: zb.CbtWorldHandle,
physics_objects_pool: PhysicsObjectsPool,
entities: *std.ArrayList(Entity),
camera: *Camera,
) void {
const world_body = physics_objects_pool.getBody();
zb.cbtBodyCreate(world_body, 0.0, &Mat4.initTranslation(Vec3.init(0, 0, 0)).toArray4x3(), shape_world);
zb.cbtBodySetFriction(world_body, default_world_friction);
createAddEntity(world, world_body, Vec4.init(0.25, 0.25, 0.25, 0.125), entities);
//
// Create shapes
//
const sphere_shape = zb.cbtShapeAllocate(zb.CBT_SHAPE_TYPE_SPHERE);
zb.cbtShapeSphereCreate(sphere_shape, 1.5);
const box_shape = physics_objects_pool.getShape(zb.CBT_SHAPE_TYPE_BOX);
zb.cbtShapeBoxCreate(box_shape, &Vec3.init(0.5, 1.0, 2.0).c);
const capsule_shape = physics_objects_pool.getShape(zb.CBT_SHAPE_TYPE_CAPSULE);
zb.cbtShapeCapsuleCreate(capsule_shape, 1.0, 2.0, zb.CBT_LINEAR_AXIS_Y);
const cylinder_shape = physics_objects_pool.getShape(zb.CBT_SHAPE_TYPE_CYLINDER);
zb.cbtShapeCylinderCreate(cylinder_shape, &Vec3.init(1.5, 2.0, 1.5).c, zb.CBT_LINEAR_AXIS_Y);
const thin_cylinder_shape = physics_objects_pool.getShape(zb.CBT_SHAPE_TYPE_CYLINDER);
zb.cbtShapeCylinderCreate(thin_cylinder_shape, &Vec3.init(0.3, 1.1, 0.3).c, zb.CBT_LINEAR_AXIS_Y);
const cone_shape = physics_objects_pool.getShape(zb.CBT_SHAPE_TYPE_CONE);
zb.cbtShapeConeCreate(cone_shape, 1.0, 2.0, zb.CBT_LINEAR_AXIS_Y);
const compound_shape = physics_objects_pool.getShape(zb.CBT_SHAPE_TYPE_COMPOUND);
zb.cbtShapeCompoundCreate(compound_shape, true, 3);
zb.cbtShapeCompoundAddChild(
compound_shape,
&Mat4.initTranslation(Vec3.init(0, 0, 0)).toArray4x3(),
thin_cylinder_shape,
);
zb.cbtShapeCompoundAddChild(
compound_shape,
&Mat4.initTranslation(Vec3.init(0, 2, 0)).toArray4x3(),
shape_sphere_r1,
);
zb.cbtShapeCompoundAddChild(
compound_shape,
&Mat4.initTranslation(Vec3.init(0, -2, 0.0)).toArray4x3(),
shape_box_e111,
);
//
// Create bodies and entities
//
const body0 = physics_objects_pool.getBody();
zb.cbtBodyCreate(body0, 15.0, &Mat4.initTranslation(Vec3.init(3, 3.5, 5)).toArray4x3(), shape_box_e111);
createAddEntity(world, body0, Vec4.init(0.75, 0.0, 0.0, 0.5), entities);
const body1 = physics_objects_pool.getBody();
zb.cbtBodyCreate(body1, 50.0, &Mat4.initTranslation(Vec3.init(-3, 3.5, 5)).toArray4x3(), box_shape);
createAddEntity(world, body1, Vec4.init(1.0, 0.9, 0.0, 0.75), entities);
const body2 = physics_objects_pool.getBody();
zb.cbtBodyCreate(body2, 25.0, &Mat4.initTranslation(Vec3.init(-3, 3.5, 10)).toArray4x3(), sphere_shape);
createAddEntity(world, body2, Vec4.init(0.0, 0.1, 1.0, 0.25), entities);
const body3 = physics_objects_pool.getBody();
zb.cbtBodyCreate(body3, 30.0, &Mat4.initTranslation(Vec3.init(-5, 3.5, 10)).toArray4x3(), capsule_shape);
createAddEntity(world, body3, Vec4.init(0.0, 1.0, 0.0, 0.25), entities);
const body4 = physics_objects_pool.getBody();
zb.cbtBodyCreate(body4, 60.0, &Mat4.initTranslation(Vec3.init(5, 3.5, 10)).toArray4x3(), cylinder_shape);
createAddEntity(world, body4, Vec4.init(1.0, 1.0, 1.0, 0.75), entities);
const body5 = physics_objects_pool.getBody();
zb.cbtBodyCreate(body5, 15.0, &Mat4.initTranslation(Vec3.init(0, 3.5, 7)).toArray4x3(), cone_shape);
createAddEntity(world, body5, Vec4.init(1.0, 0.5, 0.0, 0.8), entities);
const body6 = physics_objects_pool.getBody();
zb.cbtBodyCreate(body6, 50.0, &Mat4.initTranslation(Vec3.init(0, 5, 12)).toArray4x3(), compound_shape);
createAddEntity(world, body6, Vec4.init(1.0, 0.0, 0.0, 0.1), entities);
camera.* = .{
.position = Vec3.init(0.0, 3.0, 0.0),
.forward = Vec3.initZero(),
.pitch = math.pi * 0.05,
.yaw = 0.0,
};
}
fn createScene2(
world: zb.CbtWorldHandle,
physics_objects_pool: PhysicsObjectsPool,
entities: *std.ArrayList(Entity),
camera: *Camera,
) void {
const world_body = physics_objects_pool.getBody();
zb.cbtBodyCreate(world_body, 0.0, &Mat4.initTranslation(Vec3.init(0, 0, 0)).toArray4x3(), shape_world);
zb.cbtBodySetFriction(world_body, default_world_friction);
createAddEntity(world, world_body, Vec4.init(0.25, 0.25, 0.25, 0.125), entities);
var level: u32 = 0;
var y: f32 = 2.0;
while (y <= 14.0) : (y += 2.0) {
const bound: f32 = 16.0 - y;
var z: f32 = -bound;
const base_color_roughness = if (level % 2 == 1)
Vec4.init(0.5, 0.0, 0.0, 0.5)
else
Vec4.init(0.7, 0.6, 0.0, 0.75);
level += 1;
while (z <= bound) : (z += 2.0) {
var x: f32 = -bound;
while (x <= bound) : (x += 2.0) {
const body = physics_objects_pool.getBody();
zb.cbtBodyCreate(body, 1.0, &Mat4.initTranslation(Vec3.init(x, y, z)).toArray4x3(), shape_box_e111);
createAddEntity(world, body, base_color_roughness, entities);
}
}
}
camera.* = .{
.position = Vec3.init(30.0, 30.0, -30.0),
.forward = Vec3.initZero(),
.pitch = math.pi * 0.2,
.yaw = -math.pi * 0.25,
};
}
fn createScene3(
world: zb.CbtWorldHandle,
physics_objects_pool: PhysicsObjectsPool,
entities: *std.ArrayList(Entity),
camera: *Camera,
) void {
const world_body = physics_objects_pool.getBody();
zb.cbtBodyCreate(world_body, 0.0, &Mat4.initTranslation(Vec3.init(0, 0, 0)).toArray4x3(), shape_world);
zb.cbtBodySetFriction(world_body, default_world_friction);
createAddEntity(world, world_body, Vec4.init(0.25, 0.25, 0.25, 0.125), entities);
// Chain of boxes
var x: f32 = -14.0;
var prev_body: zb.CbtBodyHandle = null;
while (x <= 14.0) : (x += 4.0) {
const body = physics_objects_pool.getBody();
zb.cbtBodyCreate(body, 10.0, &Mat4.initTranslation(Vec3.init(x, 3.5, 5)).toArray4x3(), shape_box_e111);
createAddEntity(world, body, Vec4.init(0.75, 0.0, 0.0, 0.5), entities);
if (prev_body != null) {
const p2p = physics_objects_pool.getConstraint(zb.CBT_CONSTRAINT_TYPE_POINT2POINT);
zb.cbtConPoint2PointCreate2(p2p, prev_body, body, &Vec3.init(1.25, 0, 0).c, &Vec3.init(-1.25, 0, 0).c);
zb.cbtConPoint2PointSetTau(p2p, 0.001);
zb.cbtWorldAddConstraint(world, p2p, false);
}
prev_body = body;
}
// Chain of spheres
x = -14.0;
prev_body = null;
while (x <= 14.0) : (x += 4.0) {
const body = physics_objects_pool.getBody();
zb.cbtBodyCreate(body, 10.0, &Mat4.initTranslation(Vec3.init(x, 3.5, 10)).toArray4x3(), shape_sphere_r1);
createAddEntity(world, body, Vec4.init(0.0, 0.75, 0.0, 0.5), entities);
if (prev_body != null) {
const p2p = physics_objects_pool.getConstraint(zb.CBT_CONSTRAINT_TYPE_POINT2POINT);
zb.cbtConPoint2PointCreate2(p2p, prev_body, body, &Vec3.init(1.1, 0, 0).c, &Vec3.init(-1.1, 0, 0).c);
zb.cbtConPoint2PointSetTau(p2p, 0.001);
zb.cbtWorldAddConstraint(world, p2p, false);
}
prev_body = body;
}
// Fixed chain of spheres
var y: f32 = 16.0;
prev_body = null;
const static_body = physics_objects_pool.getBody();
zb.cbtBodyCreate(static_body, 0.0, &Mat4.initTranslation(Vec3.init(10, y, 10)).toArray4x3(), shape_box_e111);
createAddEntity(world, static_body, Vec4.init(0.75, 0.75, 0.0, 0.5), entities);
while (y >= 1.0) : (y -= 4.0) {
const body = physics_objects_pool.getBody();
zb.cbtBodyCreate(body, 10.0, &Mat4.initTranslation(Vec3.init(10, y, 10)).toArray4x3(), shape_sphere_r1);
createAddEntity(world, body, Vec4.init(0.0, 0.25, 1.0, 0.25), entities);
if (prev_body != null) {
const p2p = physics_objects_pool.getConstraint(zb.CBT_CONSTRAINT_TYPE_POINT2POINT);
zb.cbtConPoint2PointCreate2(p2p, body, prev_body, &Vec3.init(0, 1.25, 0).c, &Vec3.init(0, -1.25, 0).c);
zb.cbtConPoint2PointSetTau(p2p, 0.001);
zb.cbtWorldAddConstraint(world, p2p, false);
} else {
const p2p = physics_objects_pool.getConstraint(zb.CBT_CONSTRAINT_TYPE_POINT2POINT);
zb.cbtConPoint2PointCreate2(p2p, body, static_body, &Vec3.init(0, 1.25, 0).c, &Vec3.init(0, -1.25, 0).c);
zb.cbtConPoint2PointSetTau(p2p, 0.001);
zb.cbtWorldAddConstraint(world, p2p, false);
}
prev_body = body;
}
camera.* = .{
.position = Vec3.init(0.0, 7.0, -5.0),
.forward = Vec3.initZero(),
.pitch = math.pi * 0.125,
.yaw = 0.0,
};
}
fn createScene4(
world: zb.CbtWorldHandle,
physics_objects_pool: PhysicsObjectsPool,
entities: *std.ArrayList(Entity),
camera: *Camera,
connected_bodies: *std.ArrayList(BodyWithPivot),
motors: *std.ArrayList(zb.CbtConstraintHandle),
) void {
const world_body = physics_objects_pool.getBody();
zb.cbtBodyCreate(world_body, 0.0, &Mat4.initTranslation(Vec3.init(0, 0, 0)).toArray4x3(), shape_world);
zb.cbtBodySetFriction(world_body, default_world_friction);
createAddEntity(world, world_body, Vec4.init(0.25, 0.25, 0.25, 0.125), entities);
{
const support_shape = physics_objects_pool.getShape(zb.CBT_SHAPE_TYPE_CYLINDER);
zb.cbtShapeCylinderCreate(support_shape, &Vec3.init(0.7, 3.5, 0.7).c, zb.CBT_LINEAR_AXIS_Y);
const support_body = physics_objects_pool.getBody();
zb.cbtBodyCreate(
support_body,
0.0,
&Mat4.initRotationX(math.pi * 0.5).mul(Mat4.initTranslation(Vec3.init(1, 17.7, 12))).toArray4x3(),
support_shape,
);
createAddEntity(world, support_body, Vec4.init(0.1, 0.1, 0.1, 0.5), entities);
const box_shape = physics_objects_pool.getShape(zb.CBT_SHAPE_TYPE_BOX);
zb.cbtShapeBoxCreate(box_shape, &Vec3.init(0.2, 2.0, 3.0).c);
const body0 = physics_objects_pool.getBody();
zb.cbtBodyCreate(body0, 50.0, &Mat4.initTranslation(Vec3.init(1.0, 15.0, 12)).toArray4x3(), box_shape);
createAddEntity(world, body0, Vec4.init(1.0, 0.0, 0.0, 0.7), entities);
const body1 = physics_objects_pool.getBody();
zb.cbtBodyCreate(body1, 50.0, &Mat4.initTranslation(Vec3.init(1.0, 11.0, 12)).toArray4x3(), box_shape);
createAddEntity(world, body1, Vec4.init(0.0, 1.0, 0.0, 0.7), entities);
const body2 = physics_objects_pool.getBody();
zb.cbtBodyCreate(body2, 50.0, &Mat4.initTranslation(Vec3.init(1.0, 7.0, 12)).toArray4x3(), box_shape);
zb.cbtBodyApplyCentralImpulse(body2, &zb.CbtVector3{ 1000, 0, 0 });
createAddEntity(world, body2, Vec4.init(0.0, 0.2, 1.0, 0.7), entities);
const hinge0 = physics_objects_pool.getConstraint(zb.CBT_CONSTRAINT_TYPE_HINGE);
zb.cbtConHingeCreate1(hinge0, body0, &Vec3.init(0, 2.8, 0).c, &Vec3.init(0, 0, 1).c, false);
zb.cbtWorldAddConstraint(world, hinge0, true);
const hinge1 = physics_objects_pool.getConstraint(zb.CBT_CONSTRAINT_TYPE_HINGE);
zb.cbtConHingeCreate2(
hinge1,
body0,
body1,
&Vec3.init(0, -2.1, 0).c,
&Vec3.init(0, 2.1, 0).c,
&Vec3.init(0, 0, 1).c,
&Vec3.init(0, 0, 1).c,
false,
);
zb.cbtConHingeSetLimit(hinge1, -math.pi * 0.5, math.pi * 0.5, 0.9, 0.3, 1.0);
zb.cbtWorldAddConstraint(world, hinge1, true);
const hinge2 = physics_objects_pool.getConstraint(zb.CBT_CONSTRAINT_TYPE_HINGE);
zb.cbtConHingeCreate2(
hinge2,
body1,
body2,
&Vec3.init(0, -2.1, 0).c,
&Vec3.init(0, 2.1, 0).c,
&Vec3.init(0, 0, 1).c,
&Vec3.init(0, 0, 1).c,
false,
);
zb.cbtConHingeSetLimit(hinge2, -math.pi * 0.5, math.pi * 0.5, 0.9, 0.3, 1.0);
zb.cbtWorldAddConstraint(world, hinge2, true);
}
{
const support_shape = physics_objects_pool.getShape(zb.CBT_SHAPE_TYPE_CYLINDER);
zb.cbtShapeCylinderCreate(support_shape, &Vec3.init(0.7, 0.7, 0.7).c, zb.CBT_LINEAR_AXIS_Y);
var i: u32 = 0;
while (i < 3) : (i += 1) {
const x = -3 + @intToFloat(f32, i) * 2.025;
const body = physics_objects_pool.getBody();
zb.cbtBodyCreate(
body,
100.0,
&Mat4.initTranslation(Vec3.init(x, 5, 5)).toArray4x3(),
shape_sphere_r1,
);
zb.cbtBodySetRestitution(body, 1.0);
zb.cbtBodySetFriction(body, 0.0);
zb.cbtBodySetDamping(body, 0.1, 0.1);
createAddEntity(world, body, Vec4.init(1.0, 0.0, 0.0, 0.25), entities);
const ref = Mat4.initRotationY(math.pi * 0.5).mul(Mat4.initTranslation(Vec3.init(0, 12, 0)));
const slider = physics_objects_pool.getConstraint(zb.CBT_CONSTRAINT_TYPE_SLIDER);
zb.cbtConSliderCreate1(slider, body, &ref.toArray4x3(), true);
zb.cbtConSliderSetLinearLowerLimit(slider, 0.0);
zb.cbtConSliderSetLinearUpperLimit(slider, 0.0);
zb.cbtConSliderSetAngularLowerLimit(slider, -math.pi * 0.5);
zb.cbtConSliderSetAngularUpperLimit(slider, math.pi * 0.5);
zb.cbtWorldAddConstraint(world, slider, true);
const support_body = physics_objects_pool.getBody();
zb.cbtBodyCreate(
support_body,
0.0,
&Mat4.initRotationX(math.pi * 0.5).mul(Mat4.initTranslation(Vec3.init(x, 17, 5))).toArray4x3(),
support_shape,
);
createAddEntity(world, support_body, Vec4.init(0.1, 0.1, 0.1, 0.5), entities);
connected_bodies.append(.{ .body = body, .pivot = Vec3.init(0, 1, 0) }) catch unreachable;
connected_bodies.append(.{ .body = support_body, .pivot = Vec3.initZero() }) catch unreachable;
if (i == 2) {
zb.cbtBodyApplyCentralImpulse(body, &zb.CbtVector3{ 300, 0, 0 });
}
}
}
{
const support_shape = physics_objects_pool.getShape(zb.CBT_SHAPE_TYPE_BOX);
zb.cbtShapeBoxCreate(support_shape, &Vec3.init(0.3, 5.0, 0.3).c);
const support_body0 = physics_objects_pool.getBody();
zb.cbtBodyCreate(
support_body0,
0.0,
&Mat4.initTranslation(Vec3.init(10, 5.0, 7)).toArray4x3(),
support_shape,
);
createAddEntity(world, support_body0, Vec4.init(0.1, 0.1, 0.1, 0.5), entities);
const support_body1 = physics_objects_pool.getBody();
zb.cbtBodyCreate(
support_body1,
0.0,
&Mat4.initTranslation(Vec3.init(20, 5.0, 7)).toArray4x3(),
support_shape,
);
createAddEntity(world, support_body1, Vec4.init(0.1, 0.1, 0.1, 0.5), entities);
connected_bodies.append(.{ .body = support_body0, .pivot = Vec3.init(0, 4, 0) }) catch unreachable;
connected_bodies.append(.{ .body = support_body1, .pivot = Vec3.init(0, 4, 0) }) catch unreachable;
connected_bodies.append(.{ .body = support_body0, .pivot = Vec3.init(0, 1, 0) }) catch unreachable;
connected_bodies.append(.{ .body = support_body1, .pivot = Vec3.init(0, 1, 0) }) catch unreachable;
connected_bodies.append(.{ .body = support_body0, .pivot = Vec3.init(0, -2, 0) }) catch unreachable;
connected_bodies.append(.{ .body = support_body1, .pivot = Vec3.init(0, -2, 0) }) catch unreachable;
const body0 = physics_objects_pool.getBody();
zb.cbtBodyCreate(
body0,
50.0,
&Mat4.initTranslation(Vec3.init(15, 9.0, 7)).toArray4x3(),
shape_box_e111,
);
createAddEntity(world, body0, Vec4.init(0.0, 0.2, 1.0, 0.7), entities);
const slider0 = physics_objects_pool.getConstraint(zb.CBT_CONSTRAINT_TYPE_SLIDER);
zb.cbtConSliderCreate1(slider0, body0, &Mat4.initIdentity().toArray4x3(), true);
zb.cbtConSliderSetLinearLowerLimit(slider0, -4.0);
zb.cbtConSliderSetLinearUpperLimit(slider0, 4.0);
zb.cbtConSliderSetAngularLowerLimit(slider0, math.pi);
zb.cbtConSliderSetAngularUpperLimit(slider0, -math.pi);
zb.cbtWorldAddConstraint(world, slider0, true);
const body1 = physics_objects_pool.getBody();
zb.cbtBodyCreate(
body1,
50.0,
&Mat4.initTranslation(Vec3.init(15, 6, 7)).toArray4x3(),
shape_box_e111,
);
createAddEntity(world, body1, Vec4.init(0.0, 1.0, 0.0, 0.7), entities);
const slider1 = physics_objects_pool.getConstraint(zb.CBT_CONSTRAINT_TYPE_SLIDER);
zb.cbtConSliderCreate1(slider1, body1, &Mat4.initIdentity().toArray4x3(), true);
zb.cbtConSliderSetLinearLowerLimit(slider1, -4.0);
zb.cbtConSliderSetLinearUpperLimit(slider1, 4.0);
zb.cbtWorldAddConstraint(world, slider1, true);
const body2 = physics_objects_pool.getBody();
zb.cbtBodyCreate(
body2,
50.0,
&Mat4.initTranslation(Vec3.init(15, 3, 7)).toArray4x3(),
shape_box_e111,
);
createAddEntity(world, body2, Vec4.init(1.0, 0.0, 0.0, 0.7), entities);
const slider2 = physics_objects_pool.getConstraint(zb.CBT_CONSTRAINT_TYPE_SLIDER);
zb.cbtConSliderCreate1(slider2, body2, &Mat4.initIdentity().toArray4x3(), true);
zb.cbtConSliderSetLinearLowerLimit(slider2, -4.0);
zb.cbtConSliderSetLinearUpperLimit(slider2, 4.0);
zb.cbtConSliderSetAngularLowerLimit(slider2, math.pi);
zb.cbtConSliderSetAngularUpperLimit(slider2, -math.pi);
zb.cbtConSliderEnableAngularMotor(slider2, true, 2.0, 10.0);
zb.cbtWorldAddConstraint(world, slider2, true);
motors.append(slider2) catch unreachable;
}
{
const gear00_shape = physics_objects_pool.getShape(zb.CBT_SHAPE_TYPE_CYLINDER);
zb.cbtShapeCylinderCreate(gear00_shape, &Vec3.init(1.5, 0.3, 1.5).c, zb.CBT_LINEAR_AXIS_Y);
const gear01_shape = physics_objects_pool.getShape(zb.CBT_SHAPE_TYPE_CYLINDER);
zb.cbtShapeCylinderCreate(gear01_shape, &Vec3.init(1.65, 0.15, 1.65).c, zb.CBT_LINEAR_AXIS_Y);
const gear0_shape = physics_objects_pool.getShape(zb.CBT_SHAPE_TYPE_COMPOUND);
zb.cbtShapeCompoundCreate(gear0_shape, true, 2);
zb.cbtShapeCompoundAddChild(
gear0_shape,
&Mat4.initTranslation(Vec3.init(0, 0, 0)).toArray4x3(),
gear00_shape,
);
zb.cbtShapeCompoundAddChild(
gear0_shape,
&Mat4.initTranslation(Vec3.init(0, 0, 0)).toArray4x3(),
gear01_shape,
);
const gear1_shape = physics_objects_pool.getShape(zb.CBT_SHAPE_TYPE_CYLINDER);
zb.cbtShapeCylinderCreate(gear1_shape, &Vec3.init(1.5, 0.3, 1.5).c, zb.CBT_LINEAR_AXIS_Y);
const gear0_body = physics_objects_pool.getBody();
zb.cbtBodyCreate(
gear0_body,
1.0,
&Mat4.initRotationX(math.pi * 0.5).mul(Mat4.initTranslation(Vec3.init(-15.0, 5, 7))).toArray4x3(),
gear0_shape,
);
zb.cbtBodySetLinearFactor(gear0_body, &Vec3.init(0, 0, 0).c);
zb.cbtBodySetAngularFactor(gear0_body, &Vec3.init(0, 0, 1).c);
createAddEntity(world, gear0_body, Vec4.init(1.0, 0.0, 0.0, 0.7), entities);
const slider = physics_objects_pool.getConstraint(zb.CBT_CONSTRAINT_TYPE_SLIDER);
zb.cbtConSliderCreate1(slider, gear0_body, &Mat4.initRotationZ(math.pi * 0.5).toArray4x3(), true);
zb.cbtConSliderSetLinearLowerLimit(slider, 0.0);
zb.cbtConSliderSetLinearUpperLimit(slider, 0.0);
zb.cbtConSliderSetAngularLowerLimit(slider, math.pi);
zb.cbtConSliderSetAngularUpperLimit(slider, -math.pi);
zb.cbtConSliderEnableAngularMotor(slider, true, 3.2, 40.0);
zb.cbtWorldAddConstraint(world, slider, true);
motors.append(slider) catch unreachable;
const gear1_body = physics_objects_pool.getBody();
zb.cbtBodyCreate(
gear1_body,
2.0,
&Mat4.initRotationX(math.pi * 0.5).mul(Mat4.initTranslation(Vec3.init(-10.0, 5, 7))).toArray4x3(),
gear1_shape,
);
zb.cbtBodySetLinearFactor(gear1_body, &Vec3.init(0, 0, 0).c);
zb.cbtBodySetAngularFactor(gear1_body, &Vec3.init(0, 0, 1).c);
createAddEntity(world, gear1_body, Vec4.init(0.0, 1.0, 0.0, 0.7), entities);
const connection_shape = physics_objects_pool.getShape(zb.CBT_SHAPE_TYPE_BOX);
zb.cbtShapeBoxCreate(connection_shape, &Vec3.init(2.5, 0.2, 0.1).c);
const connection_body = physics_objects_pool.getBody();
zb.cbtBodyCreate(
connection_body,
1.0,
&Mat4.initTranslation(Vec3.init(-12.5, 6, 6)).toArray4x3(),
connection_shape,
);
createAddEntity(world, connection_body, Vec4.init(0.0, 0.0, 0.0, 0.5), entities);
{
const p2p = physics_objects_pool.getConstraint(zb.CBT_CONSTRAINT_TYPE_POINT2POINT);
zb.cbtConPoint2PointCreate2(
p2p,
gear0_body,
connection_body,
&zb.CbtVector3{ 0.0, -0.4, 1.0 },
&zb.CbtVector3{ -2.5, 0, 0 },
);
zb.cbtWorldAddConstraint(world, p2p, true);
}
{
const p2p = physics_objects_pool.getConstraint(zb.CBT_CONSTRAINT_TYPE_POINT2POINT);
zb.cbtConPoint2PointCreate2(
p2p,
gear1_body,
connection_body,
&zb.CbtVector3{ 0.0, -0.4, -1.0 },
&zb.CbtVector3{ 2.5, 0, 0 },
);
zb.cbtWorldAddConstraint(world, p2p, true);
}
}
camera.* = .{
.position = Vec3.init(0.0, 7.0, -7.0),
.forward = Vec3.initZero(),
.pitch = 0.0,
.yaw = 0.0,
};
}
fn init(allocator: std.mem.Allocator) !DemoState {
const window = try common.initWindow(allocator, window_name, window_width, window_height);
var arena_allocator_state = std.heap.ArenaAllocator.init(allocator);
defer arena_allocator_state.deinit();
const arena_allocator = arena_allocator_state.allocator();
var gctx = zd3d12.GraphicsContext.init(allocator, window);
gctx.present_flags = 0;
gctx.present_interval = 1;
const barycentrics_supported = blk: {
var options3: d3d12.FEATURE_DATA_D3D12_OPTIONS3 = undefined;
const res = gctx.device.CheckFeatureSupport(.OPTIONS3, &options3, @sizeOf(d3d12.FEATURE_DATA_D3D12_OPTIONS3));
break :blk options3.BarycentricsSupported == w32.TRUE and res == w32.S_OK;
};
const physics_debug_pso = blk: {
var pso_desc = d3d12.GRAPHICS_PIPELINE_STATE_DESC.initDefault();
pso_desc.RTVFormats[0] = .R8G8B8A8_UNORM;
pso_desc.NumRenderTargets = 1;
pso_desc.BlendState.RenderTarget[0].RenderTargetWriteMask = 0xf;
pso_desc.PrimitiveTopologyType = .LINE;
pso_desc.DSVFormat = .D32_FLOAT;
pso_desc.SampleDesc = .{ .Count = num_msaa_samples, .Quality = 0 };
break :blk gctx.createGraphicsShaderPipeline(
arena_allocator,
&pso_desc,
content_dir ++ "shaders/physics_debug.vs.cso",
content_dir ++ "shaders/physics_debug.ps.cso",
);
};
const simple_entity_pso = blk: {
const input_layout_desc = [_]d3d12.INPUT_ELEMENT_DESC{
d3d12.INPUT_ELEMENT_DESC.init("POSITION", 0, .R32G32B32_FLOAT, 0, 0, .PER_VERTEX_DATA, 0),
d3d12.INPUT_ELEMENT_DESC.init("_Normal", 0, .R32G32B32_FLOAT, 0, 12, .PER_VERTEX_DATA, 0),
};
var pso_desc = d3d12.GRAPHICS_PIPELINE_STATE_DESC.initDefault();
pso_desc.InputLayout = .{
.pInputElementDescs = &input_layout_desc,
.NumElements = input_layout_desc.len,
};
pso_desc.RTVFormats[0] = .R8G8B8A8_UNORM;
pso_desc.NumRenderTargets = 1;
pso_desc.BlendState.RenderTarget[0].RenderTargetWriteMask = 0xf;
pso_desc.PrimitiveTopologyType = .TRIANGLE;
pso_desc.DSVFormat = .D32_FLOAT;
pso_desc.SampleDesc = .{ .Count = num_msaa_samples, .Quality = 0 };
if (!barycentrics_supported) {
break :blk gctx.createGraphicsShaderPipelineVsGsPs(
arena_allocator,
&pso_desc,
content_dir ++ "shaders/simple_entity.vs.cso",
content_dir ++ "shaders/simple_entity.gs.cso",
content_dir ++ "shaders/simple_entity_with_gs.ps.cso",
);
} else {
break :blk gctx.createGraphicsShaderPipeline(
arena_allocator,
&pso_desc,
content_dir ++ "shaders/simple_entity.vs.cso",
content_dir ++ "shaders/simple_entity.ps.cso",
);
}
};
const color_texture = gctx.createCommittedResource(
.DEFAULT,
d3d12.HEAP_FLAG_NONE,
&blk: {
var desc = d3d12.RESOURCE_DESC.initTex2d(.R8G8B8A8_UNORM, gctx.viewport_width, gctx.viewport_height, 1);
desc.Flags = d3d12.RESOURCE_FLAG_ALLOW_RENDER_TARGET;
desc.SampleDesc.Count = num_msaa_samples;
break :blk desc;
},
d3d12.RESOURCE_STATE_RENDER_TARGET,
&d3d12.CLEAR_VALUE.initColor(.R8G8B8A8_UNORM, &.{ 0.0, 0.0, 0.0, 1.0 }),
) catch |err| hrPanic(err);
const color_texture_rtv = gctx.allocateCpuDescriptors(.RTV, 1);
gctx.device.CreateRenderTargetView(gctx.lookupResource(color_texture).?, null, color_texture_rtv);
const depth_texture = gctx.createCommittedResource(
.DEFAULT,
d3d12.HEAP_FLAG_NONE,
&blk: {
var desc = d3d12.RESOURCE_DESC.initTex2d(.D32_FLOAT, gctx.viewport_width, gctx.viewport_height, 1);
desc.Flags = d3d12.RESOURCE_FLAG_ALLOW_DEPTH_STENCIL | d3d12.RESOURCE_FLAG_DENY_SHADER_RESOURCE;
desc.SampleDesc.Count = num_msaa_samples;
break :blk desc;
},
d3d12.RESOURCE_STATE_DEPTH_WRITE,
&d3d12.CLEAR_VALUE.initDepthStencil(.D32_FLOAT, 1.0, 0),
) catch |err| hrPanic(err);
const depth_texture_dsv = gctx.allocateCpuDescriptors(.DSV, 1);
gctx.device.CreateDepthStencilView(gctx.lookupResource(depth_texture).?, null, depth_texture_dsv);
zmesh.init(arena_allocator);
defer zmesh.deinit();
var all_meshes = std.ArrayList(Mesh).init(allocator);
var all_positions = std.ArrayList([3]f32).init(arena_allocator);
var all_normals = std.ArrayList([3]f32).init(arena_allocator);
var all_indices = std.ArrayList(u32).init(arena_allocator);
loadAllMeshes(&all_meshes, &all_positions, &all_normals, &all_indices);
const physics_world = zb.cbtWorldCreate();
zb.cbtWorldSetGravity(physics_world, &Vec3.init(0.0, -10.0, 0.0).c);
var physics_debug = allocator.create(PhysicsDebug) catch unreachable;
physics_debug.* = PhysicsDebug.init(allocator);
zb.cbtWorldDebugSetDrawer(physics_world, &.{
.drawLine1 = PhysicsDebug.drawLine1,
.drawLine2 = PhysicsDebug.drawLine2,
.drawContactPoint = null,
.context = physics_debug,
});
// Create common shapes.
{
shape_world = zb.cbtShapeAllocate(zb.CBT_SHAPE_TYPE_TRIANGLE_MESH);
zb.cbtShapeTriMeshCreateBegin(shape_world);
zb.cbtShapeTriMeshAddIndexVertexArray(
shape_world,
@intCast(i32, all_meshes.items[mesh_world].num_indices / 3),
&all_indices.items[all_meshes.items[mesh_world].index_offset],
3 * @sizeOf(u32),
@intCast(i32, all_meshes.items[mesh_world].num_vertices),
&all_positions.items[all_meshes.items[mesh_world].vertex_offset],
3 * @sizeOf(f32),
);
zb.cbtShapeTriMeshCreateEnd(shape_world);
shape_sphere_r1 = zb.cbtShapeAllocate(zb.CBT_SHAPE_TYPE_SPHERE);
zb.cbtShapeSphereCreate(shape_sphere_r1, 1.0);
shape_box_e111 = zb.cbtShapeAllocate(zb.CBT_SHAPE_TYPE_BOX);
zb.cbtShapeBoxCreate(shape_box_e111, &Vec3.init(1.0, 1.0, 1.0).c);
}
const physics_objects_pool = PhysicsObjectsPool.init();
var camera: Camera = undefined;
var entities = std.ArrayList(Entity).init(allocator);
createScene1(physics_world, physics_objects_pool, &entities, &camera);
entities.items[0].flags = 1;
var connected_bodies = std.ArrayList(BodyWithPivot).init(allocator);
var motors = std.ArrayList(zb.CbtConstraintHandle).init(allocator);
var vertex_buffer = gctx.createCommittedResource(
.DEFAULT,
d3d12.HEAP_FLAG_NONE,
&d3d12.RESOURCE_DESC.initBuffer(all_positions.items.len * @sizeOf(Vertex)),
d3d12.RESOURCE_STATE_COPY_DEST,
null,
) catch |err| hrPanic(err);
var index_buffer = gctx.createCommittedResource(
.DEFAULT,
d3d12.HEAP_FLAG_NONE,
&d3d12.RESOURCE_DESC.initBuffer(all_indices.items.len * @sizeOf(u32)),
d3d12.RESOURCE_STATE_COPY_DEST,
null,
) catch |err| hrPanic(err);
//
// Begin frame to init/upload resources to the GPU.
//
gctx.beginFrame();
gctx.endFrame();
gctx.beginFrame();
var guir = GuiRenderer.init(arena_allocator, &gctx, num_msaa_samples, content_dir);
{
const upload = gctx.allocateUploadBufferRegion(Vertex, @intCast(u32, all_positions.items.len));
for (all_positions.items) |_, i| {
upload.cpu_slice[i].position = all_positions.items[i];
upload.cpu_slice[i].normal = all_normals.items[i];
}
gctx.cmdlist.CopyBufferRegion(
gctx.lookupResource(vertex_buffer).?,
0,
upload.buffer,
upload.buffer_offset,
upload.cpu_slice.len * @sizeOf(@TypeOf(upload.cpu_slice[0])),
);
gctx.addTransitionBarrier(vertex_buffer, d3d12.RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER);
}
{
const upload = gctx.allocateUploadBufferRegion(u32, @intCast(u32, all_indices.items.len));
for (all_indices.items) |_, i| {
upload.cpu_slice[i] = all_indices.items[i];
}
gctx.cmdlist.CopyBufferRegion(
gctx.lookupResource(index_buffer).?,
0,
upload.buffer,
upload.buffer_offset,
upload.cpu_slice.len * @sizeOf(@TypeOf(upload.cpu_slice[0])),
);
gctx.addTransitionBarrier(index_buffer, d3d12.RESOURCE_STATE_INDEX_BUFFER);
}
gctx.endFrame();
gctx.finishGpuCommands();
return DemoState{
.gctx = gctx,
.guir = guir,
.frame_stats = common.FrameStats.init(),
.physics_world = physics_world,
.physics_debug = physics_debug,
.physics_objects_pool = physics_objects_pool,
.entities = entities,
.connected_bodies = connected_bodies,
.motors = motors,
.physics_debug_pso = physics_debug_pso,
.simple_entity_pso = simple_entity_pso,
.vertex_buffer = vertex_buffer,
.index_buffer = index_buffer,
.meshes = all_meshes,
.color_texture = color_texture,
.depth_texture = depth_texture,
.color_texture_rtv = color_texture_rtv,
.depth_texture_dsv = depth_texture_dsv,
.camera = camera,
.mouse = .{
.cursor_prev_x = 0,
.cursor_prev_y = 0,
},
.pick = .{
.body = null,
.saved_linear_damping = 0.0,
.saved_angular_damping = 0.0,
.constraint = zb.cbtConAllocate(zb.CBT_CONSTRAINT_TYPE_POINT2POINT),
.distance = 0.0,
},
.current_scene_index = 0,
.selected_entity_index = 0,
.keyboard_delay = 0.0,
.simulation_is_paused = false,
.do_simulation_step = false,
};
}
fn deinit(demo: *DemoState, allocator: std.mem.Allocator) void {
demo.gctx.finishGpuCommands();
demo.meshes.deinit();
demo.guir.deinit(&demo.gctx);
demo.gctx.deinit(allocator);
common.deinitWindow(allocator);
if (zb.cbtConIsCreated(demo.pick.constraint)) {
zb.cbtWorldRemoveConstraint(demo.physics_world, demo.pick.constraint);
zb.cbtConDestroy(demo.pick.constraint);
}
zb.cbtConDeallocate(demo.pick.constraint);
demo.entities.deinit();
demo.connected_bodies.deinit();
demo.motors.deinit();
demo.physics_objects_pool.deinit(demo.physics_world);
demo.physics_debug.deinit();
allocator.destroy(demo.physics_debug);
zb.cbtWorldDestroy(demo.physics_world);
demo.* = undefined;
}
fn createAddEntity(
world: zb.CbtWorldHandle,
body: zb.CbtBodyHandle,
base_color_roughness: Vec4,
entities: *std.ArrayList(Entity),
) void {
const shape = zb.cbtBodyGetShape(body);
const shape_type = zb.cbtShapeGetType(shape);
const mesh_index = switch (shape_type) {
zb.CBT_SHAPE_TYPE_BOX => mesh_cube,
zb.CBT_SHAPE_TYPE_SPHERE => mesh_sphere,
zb.CBT_SHAPE_TYPE_CONE => mesh_cone,
zb.CBT_SHAPE_TYPE_CYLINDER => mesh_cylinder,
zb.CBT_SHAPE_TYPE_CAPSULE => mesh_capsule,
zb.CBT_SHAPE_TYPE_TRIANGLE_MESH => mesh_world,
zb.CBT_SHAPE_TYPE_COMPOUND => mesh_compound,
else => blk: {
assert(false);
break :blk 0;
},
};
const mesh_size = switch (shape_type) {
zb.CBT_SHAPE_TYPE_BOX => blk: {
var half_extents: Vec3 = undefined;
zb.cbtShapeBoxGetHalfExtentsWithoutMargin(shape, &half_extents.c);
break :blk half_extents;
},
zb.CBT_SHAPE_TYPE_SPHERE => blk: {
break :blk Vec3.initS(zb.cbtShapeSphereGetRadius(shape));
},
zb.CBT_SHAPE_TYPE_CONE => blk: {
assert(zb.cbtShapeConeGetUpAxis(shape) == zb.CBT_LINEAR_AXIS_Y);
const radius = zb.cbtShapeConeGetRadius(shape);
const height = zb.cbtShapeConeGetHeight(shape);
assert(radius == 1.0 and height == 2.0);
break :blk Vec3.init(radius, 0.5 * height, radius);
},
zb.CBT_SHAPE_TYPE_CYLINDER => blk: {
var half_extents: Vec3 = undefined;
assert(zb.cbtShapeCylinderGetUpAxis(shape) == zb.CBT_LINEAR_AXIS_Y);
zb.cbtShapeCylinderGetHalfExtentsWithoutMargin(shape, &half_extents.c);
assert(half_extents.c[0] == half_extents.c[2]);
break :blk half_extents;
},
zb.CBT_SHAPE_TYPE_CAPSULE => blk: {
assert(zb.cbtShapeCapsuleGetUpAxis(shape) == zb.CBT_LINEAR_AXIS_Y);
const radius = zb.cbtShapeCapsuleGetRadius(shape);
const half_height = zb.cbtShapeCapsuleGetHalfHeight(shape);
assert(radius == 1.0 and half_height == 1.0);
break :blk Vec3.init(radius, half_height, radius);
},
zb.CBT_SHAPE_TYPE_TRIANGLE_MESH => Vec3.initS(1),
zb.CBT_SHAPE_TYPE_COMPOUND => Vec3.initS(1),
else => blk: {
assert(false);
break :blk Vec3.initS(1);
},
};
entities.append(.{
.body = body,
.base_color_roughness = base_color_roughness,
.size = mesh_size,
.mesh_index = mesh_index,
}) catch unreachable;
const entity_index = @intCast(i32, entities.items.len - 1);
zb.cbtBodySetUserIndex(body, 0, entity_index);
zb.cbtBodySetDamping(body, default_linear_damping, default_angular_damping);
zb.cbtBodySetActivationState(body, zb.CBT_DISABLE_DEACTIVATION);
zb.cbtWorldAddBody(world, body);
}
fn update(demo: *DemoState) void {
demo.frame_stats.update(demo.gctx.window, window_name);
const dt = demo.frame_stats.delta_time;
if (!demo.simulation_is_paused) {
_ = zb.cbtWorldStepSimulation(demo.physics_world, dt, 1, 1.0 / 60.0);
} else if (demo.do_simulation_step) {
_ = zb.cbtWorldStepSimulation(demo.physics_world, 1.0 / 60.0, 1, 1.0 / 60.0);
demo.do_simulation_step = false;
}
common.newImGuiFrame(dt);
c.igSetNextWindowPos(
c.ImVec2{ .x = @intToFloat(f32, demo.gctx.viewport_width) - 600.0 - 20, .y = 20.0 },
c.ImGuiCond_FirstUseEver,
c.ImVec2{ .x = 0.0, .y = 0.0 },
);
c.igSetNextWindowSize(.{ .x = 600.0, .y = -1 }, c.ImGuiCond_Always);
_ = c.igBegin(
"Demo Settings",
null,
c.ImGuiWindowFlags_NoMove | c.ImGuiWindowFlags_NoResize | c.ImGuiWindowFlags_NoSavedSettings,
);
c.igBulletText("", "");
c.igSameLine(0, -1);
c.igTextColored(.{ .x = 0, .y = 0.8, .z = 0, .w = 1 }, "Left Mouse Button", "");
c.igSameLine(0, -1);
c.igText(" : select object", "");
c.igBulletText("", "");
c.igSameLine(0, -1);
c.igTextColored(.{ .x = 0, .y = 0.8, .z = 0, .w = 1 }, "Left Mouse Button + Drag", "");
c.igSameLine(0, -1);
c.igText(" : pick up and move object", "");
c.igBulletText("", "");
c.igSameLine(0, -1);
c.igTextColored(.{ .x = 0, .y = 0.8, .z = 0, .w = 1 }, "Right Mouse Button + Drag", "");
c.igSameLine(0, -1);
c.igText(" : rotate camera", "");
c.igBulletText("", "");
c.igSameLine(0, -1);
c.igTextColored(.{ .x = 0, .y = 0.8, .z = 0, .w = 1 }, "W, A, S, D", "");
c.igSameLine(0, -1);
c.igText(" : move camera", "");
c.igBulletText("", "");
c.igSameLine(0, -1);
c.igTextColored(.{ .x = 0, .y = 0.8, .z = 0, .w = 1 }, "SPACE", "");
c.igSameLine(0, -1);
c.igText(" : shoot", "");
{
_ = c.igCombo_Str(
"##",
&demo.current_scene_index,
"Scene: Collision Shapes\x00Scene: Stack of Boxes\x00Scene: Chains\x00Scene: Constraints\x00\x00",
-1,
);
c.igSameLine(0.0, -1.0);
c.igPushStyleColor_U32(c.ImGuiCol_Text, 0xff_00_ff_ff);
if (c.igButton(" Load Scene ", .{ .x = 0, .y = 0 })) {
demo.physics_objects_pool.destroyAllObjects(demo.physics_world);
demo.entities.clearRetainingCapacity();
demo.connected_bodies.clearRetainingCapacity();
demo.motors.clearRetainingCapacity();
const scene = @intToEnum(Scene, demo.current_scene_index);
switch (scene) {
.scene1 => createScene1(demo.physics_world, demo.physics_objects_pool, &demo.entities, &demo.camera),
.scene2 => createScene2(demo.physics_world, demo.physics_objects_pool, &demo.entities, &demo.camera),
.scene3 => createScene3(demo.physics_world, demo.physics_objects_pool, &demo.entities, &demo.camera),
.scene4 => createScene4(
demo.physics_world,
demo.physics_objects_pool,
&demo.entities,
&demo.camera,
&demo.connected_bodies,
&demo.motors,
),
}
demo.selected_entity_index = 0;
demo.entities.items[demo.selected_entity_index].flags = 1;
}
c.igPopStyleColor(1);
if (c.igCollapsingHeader_TreeNodeFlags("Scene Properties", c.ImGuiTreeNodeFlags_None)) {
var gravity: zb.CbtVector3 = undefined;
zb.cbtWorldGetGravity(demo.physics_world, &gravity);
if (c.igSliderFloat("Gravity", &gravity[1], -15.0, 15.0, null, c.ImGuiSliderFlags_None)) {
zb.cbtWorldSetGravity(demo.physics_world, &gravity);
}
if (c.igButton(
if (demo.simulation_is_paused) " Resume Simulation " else " Pause Simulation ",
.{ .x = 0, .y = 0 },
)) {
demo.simulation_is_paused = !demo.simulation_is_paused;
}
if (demo.simulation_is_paused) {
c.igSameLine(0.0, -1.0);
if (c.igButton(" Step ", .{ .x = 0, .y = 0 })) {
demo.do_simulation_step = true;
}
}
}
}
{
const body = demo.entities.items[demo.selected_entity_index].body;
if (c.igCollapsingHeader_TreeNodeFlags("Object Properties", c.ImGuiTreeNodeFlags_None)) {
var linear_damping = zb.cbtBodyGetLinearDamping(body);
var angular_damping = zb.cbtBodyGetAngularDamping(body);
if (c.igSliderFloat("Linear Damping", &linear_damping, 0.0, 1.0, null, c.ImGuiSliderFlags_None)) {
zb.cbtBodySetDamping(body, linear_damping, angular_damping);
}
if (c.igSliderFloat("Angular Damping", &angular_damping, 0.0, 1.0, null, c.ImGuiSliderFlags_None)) {
zb.cbtBodySetDamping(body, linear_damping, angular_damping);
}
var friction = zb.cbtBodyGetFriction(body);
if (c.igSliderFloat("Friction", &friction, 0.0, 1.0, null, c.ImGuiSliderFlags_None)) {
zb.cbtBodySetFriction(body, friction);
}
var rolling_friction = zb.cbtBodyGetRollingFriction(body);
if (c.igSliderFloat("Rolling Friction", &rolling_friction, 0.0, 1.0, null, c.ImGuiSliderFlags_None)) {
zb.cbtBodySetRollingFriction(body, rolling_friction);
}
var restitution = zb.cbtBodyGetRestitution(body);
if (c.igSliderFloat("Restitution", &restitution, 0.0, 1.0, null, c.ImGuiSliderFlags_None)) {
zb.cbtBodySetRestitution(body, restitution);
}
const mass_flag = if (zb.cbtBodyIsStaticOrKinematic(body))
c.ImGuiInputTextFlags_ReadOnly
else
c.ImGuiInputTextFlags_EnterReturnsTrue;
var mass = zb.cbtBodyGetMass(body);
if (c.igInputFloat("Mass", &mass, 1.0, 1.0, null, mass_flag)) {
var inertia = zb.CbtVector3{ 0, 0, 0 };
if (mass > 0.0) {
zb.cbtShapeCalculateLocalInertia(zb.cbtBodyGetShape(body), mass, &inertia);
}
_ = c.igInputFloat3("Inertia", &inertia, null, c.ImGuiInputTextFlags_ReadOnly);
zb.cbtBodySetMassProps(body, mass, &inertia);
}
}
if (demo.motors.items.len > 0) {
const selected_body = demo.entities.items[demo.selected_entity_index].body;
if (zb.cbtBodyGetNumConstraints(selected_body) > 0) {
const constraint = zb.cbtBodyGetConstraint(selected_body, 0);
if (zb.cbtConGetType(constraint) == zb.CBT_CONSTRAINT_TYPE_SLIDER and
zb.cbtConSliderIsAngularMotorEnabled(constraint))
{
if (c.igCollapsingHeader_TreeNodeFlags("Motor Properties", c.ImGuiTreeNodeFlags_None)) {
var angular_velocity: zb.CbtVector3 = undefined;
zb.cbtBodyGetAngularVelocity(selected_body, &angular_velocity);
_ = c.igInputFloat3(
"Angular Velocity",
&angular_velocity,
null,
c.ImGuiInputTextFlags_ReadOnly,
);
var target_velocity: f32 = undefined;
var max_force: f32 = undefined;
zb.cbtConSliderGetAngularMotor(constraint, &target_velocity, &max_force);
if (c.igSliderFloat(
"Target Velocity",
&target_velocity,
0.0,
10.0,
null,
c.ImGuiSliderFlags_None,
)) {
zb.cbtConSliderEnableAngularMotor(constraint, true, target_velocity, max_force);
}
if (c.igSliderFloat(
"Max Force",
&max_force,
0.0,
100.0,
null,
c.ImGuiSliderFlags_None,
)) {
zb.cbtConSliderEnableAngularMotor(constraint, true, target_velocity, max_force);
}
}
}
}
}
}
c.igEnd();
if (demo.simulation_is_paused and demo.selected_entity_index > 0) { // index 0 is static world
const body = demo.entities.items[demo.selected_entity_index].body;
var linear_velocity: zb.CbtVector3 = undefined;
var angular_velocity: zb.CbtVector3 = undefined;
var position: zb.CbtVector3 = undefined;
zb.cbtBodyGetLinearVelocity(body, &linear_velocity);
zb.cbtBodyGetAngularVelocity(body, &angular_velocity);
zb.cbtBodyGetCenterOfMassPosition(body, &position);
const p1_linear = (Vec3{ .c = position }).add(Vec3{ .c = linear_velocity }).c;
const p1_angular = (Vec3{ .c = position }).add(Vec3{ .c = angular_velocity }).c;
const color_linear = zb.CbtVector3{ 1.0, 0.0, 1.0 };
const color_angular = zb.CbtVector3{ 0.0, 1.0, 1.0 };
zb.cbtWorldDebugDrawLine1(demo.physics_world, &position, &p1_linear, &color_linear);
zb.cbtWorldDebugDrawLine1(demo.physics_world, &position, &p1_angular, &color_angular);
}
// Handle camera rotation with mouse.
{
var pos: w32.POINT = undefined;
_ = w32.GetCursorPos(&pos);
const delta_x = @intToFloat(f32, pos.x) - @intToFloat(f32, demo.mouse.cursor_prev_x);
const delta_y = @intToFloat(f32, pos.y) - @intToFloat(f32, demo.mouse.cursor_prev_y);
demo.mouse.cursor_prev_x = pos.x;
demo.mouse.cursor_prev_y = pos.y;
if (w32.GetAsyncKeyState(w32.VK_RBUTTON) < 0) {
demo.camera.pitch += 0.0025 * delta_y;
demo.camera.yaw += 0.0025 * delta_x;
demo.camera.pitch = math.min(demo.camera.pitch, 0.48 * math.pi);
demo.camera.pitch = math.max(demo.camera.pitch, -0.48 * math.pi);
demo.camera.yaw = vm.modAngle(demo.camera.yaw);
}
}
// Handle camera movement with 'WASD' keys.
{
const speed: f32 = 5.0;
const delta_time = demo.frame_stats.delta_time;
const transform = Mat4.initRotationX(demo.camera.pitch).mul(Mat4.initRotationY(demo.camera.yaw));
var forward = Vec3.init(0.0, 0.0, 1.0).transform(transform).normalize();
demo.camera.forward = forward;
const right = Vec3.init(0.0, 1.0, 0.0).cross(forward).normalize().scale(speed * delta_time);
forward = forward.scale(speed * delta_time);
if (w32.GetAsyncKeyState('W') < 0) {
demo.camera.position = demo.camera.position.add(forward);
} else if (w32.GetAsyncKeyState('S') < 0) {
demo.camera.position = demo.camera.position.sub(forward);
}
if (w32.GetAsyncKeyState('D') < 0) {
demo.camera.position = demo.camera.position.add(right);
} else if (w32.GetAsyncKeyState('A') < 0) {
demo.camera.position = demo.camera.position.sub(right);
}
}
demo.keyboard_delay += dt;
if (demo.keyboard_delay >= 0.5) {
if (w32.GetAsyncKeyState(w32.VK_SPACE) < 0) {
demo.keyboard_delay = 0.0;
const body = demo.physics_objects_pool.getBody();
zb.cbtBodyCreate(body, 2.0, &Mat4.initTranslation(demo.camera.position).toArray4x3(), shape_sphere_r1);
zb.cbtBodyApplyCentralImpulse(body, &demo.camera.forward.scale(100.0).c);
createAddEntity(
demo.physics_world,
body,
Vec4.init(0, 1.0, 0.0, 0.7),
&demo.entities,
);
}
}
const mouse_button_is_down = c.igIsMouseDown(c.ImGuiMouseButton_Left) and !c.igGetIO().?.*.WantCaptureMouse;
const ray_from = demo.camera.position;
const ray_to = blk: {
var pos: w32.POINT = undefined;
_ = w32.GetCursorPos(&pos);
_ = w32.ScreenToClient(demo.gctx.window, &pos);
const mousex = @intToFloat(f32, pos.x);
const mousey = @intToFloat(f32, pos.y);
const far_plane: f32 = 10000.0;
const tanfov = @tan(0.5 * camera_fovy);
const width = @intToFloat(f32, demo.gctx.viewport_width);
const height = @intToFloat(f32, demo.gctx.viewport_height);
const aspect = width / height;
const ray_forward = demo.camera.forward.scale(far_plane);
var hor = Vec3.init(0, 1, 0).cross(ray_forward).normalize();
var vertical = hor.cross(ray_forward).normalize();
hor = hor.scale(2.0 * far_plane * tanfov * aspect);
vertical = vertical.scale(2.0 * far_plane * tanfov);
const ray_to_center = ray_from.add(ray_forward);
const dhor = hor.scale(1.0 / width);
const dvert = vertical.scale(1.0 / height);
var ray_to = ray_to_center.sub(hor.scale(0.5)).sub(vertical.scale(0.5));
ray_to = ray_to.add(dhor.scale(mousex));
ray_to = ray_to.add(dvert.scale(mousey));
break :blk ray_to;
};
if (!zb.cbtConIsCreated(demo.pick.constraint) and mouse_button_is_down) {
var result: zb.CbtRayCastResult = undefined;
const hit = zb.cbtRayTestClosest(
demo.physics_world,
&ray_from.c,
&ray_to.c,
zb.CBT_COLLISION_FILTER_DEFAULT,
zb.CBT_COLLISION_FILTER_ALL,
zb.CBT_RAYCAST_FLAG_USE_USE_GJK_CONVEX_TEST,
&result,
);
if (hit and result.body != null) {
demo.pick.body = result.body;
demo.entities.items[demo.selected_entity_index].flags = 0;
const entity_index = zb.cbtBodyGetUserIndex(result.body, 0);
demo.entities.items[@intCast(u32, entity_index)].flags = 1;
demo.selected_entity_index = @intCast(u32, entity_index);
if (!zb.cbtBodyIsStaticOrKinematic(result.body)) {
demo.pick.saved_linear_damping = zb.cbtBodyGetLinearDamping(result.body);
demo.pick.saved_angular_damping = zb.cbtBodyGetAngularDamping(result.body);
zb.cbtBodySetDamping(result.body, 0.4, 0.4);
var inv_trans: [4]zb.CbtVector3 = undefined;
zb.cbtBodyGetInvCenterOfMassTransform(result.body, &inv_trans);
const hit_point_world = Vec3{ .c = result.hit_point_world };
const pivot_a = hit_point_world.transform(Mat4.initArray4x3(inv_trans));
zb.cbtConPoint2PointCreate1(demo.pick.constraint, result.body, &pivot_a.c);
zb.cbtConPoint2PointSetImpulseClamp(demo.pick.constraint, 30.0);
zb.cbtConPoint2PointSetTau(demo.pick.constraint, 0.001);
zb.cbtConSetDebugDrawSize(demo.pick.constraint, 0.15);
zb.cbtWorldAddConstraint(demo.physics_world, demo.pick.constraint, true);
demo.pick.distance = hit_point_world.sub(ray_from).length();
}
}
} else if (zb.cbtConIsCreated(demo.pick.constraint)) {
const to = ray_from.add(ray_to.normalize().scale(demo.pick.distance));
zb.cbtConPoint2PointSetPivotB(demo.pick.constraint, &to.c);
const body_a = zb.cbtConGetBodyA(demo.pick.constraint);
const body_b = zb.cbtConGetBodyB(demo.pick.constraint);
var trans_a: [4]zb.CbtVector3 = undefined;
var trans_b: [4]zb.CbtVector3 = undefined;
zb.cbtBodyGetCenterOfMassTransform(body_a, &trans_a);
zb.cbtBodyGetCenterOfMassTransform(body_b, &trans_b);
var pivot_a: zb.CbtVector3 = undefined;
var pivot_b: zb.CbtVector3 = undefined;
zb.cbtConPoint2PointGetPivotA(demo.pick.constraint, &pivot_a);
zb.cbtConPoint2PointGetPivotB(demo.pick.constraint, &pivot_b);
const position_a = (Vec3{ .c = pivot_a }).transform(Mat4.initArray4x3(trans_a));
const position_b = (Vec3{ .c = pivot_b }).transform(Mat4.initArray4x3(trans_b));
const color0 = zb.CbtVector3{ 1.0, 1.0, 0.0 };
const color1 = zb.CbtVector3{ 1.0, 0.0, 0.0 };
zb.cbtWorldDebugDrawLine2(demo.physics_world, &position_a.c, &position_b.c, &color0, &color1);
const color2 = zb.CbtVector3{ 0.0, 1.0, 0.0 };
zb.cbtWorldDebugDrawSphere(demo.physics_world, &position_a.c, 0.05, &color2);
}
if (!mouse_button_is_down and zb.cbtConIsCreated(demo.pick.constraint)) {
zb.cbtWorldRemoveConstraint(demo.physics_world, demo.pick.constraint);
zb.cbtConDestroy(demo.pick.constraint);
zb.cbtBodySetDamping(demo.pick.body, demo.pick.saved_linear_damping, demo.pick.saved_angular_damping);
demo.pick.body = null;
}
// Draw Point2Point constraints as lines
{
const num_constraints: i32 = zb.cbtWorldGetNumConstraints(demo.physics_world);
var i: i32 = 0;
while (i < num_constraints) : (i += 1) {
const constraint = zb.cbtWorldGetConstraint(demo.physics_world, i);
if (zb.cbtConGetType(constraint) != zb.CBT_CONSTRAINT_TYPE_POINT2POINT) continue;
if (constraint == demo.pick.constraint) continue;
const body_a = zb.cbtConGetBodyA(constraint);
const body_b = zb.cbtConGetBodyB(constraint);
if (body_a == zb.cbtConGetFixedBody() or body_b == zb.cbtConGetFixedBody()) continue;
var trans_a: [4]zb.CbtVector3 = undefined;
var trans_b: [4]zb.CbtVector3 = undefined;
zb.cbtBodyGetCenterOfMassTransform(body_a, &trans_a);
zb.cbtBodyGetCenterOfMassTransform(body_b, &trans_b);
var pivot_a: zb.CbtVector3 = undefined;
var pivot_b: zb.CbtVector3 = undefined;
zb.cbtConPoint2PointGetPivotA(constraint, &pivot_a);
zb.cbtConPoint2PointGetPivotB(constraint, &pivot_b);
var body_position_a: zb.CbtVector3 = undefined;
var body_position_b: zb.CbtVector3 = undefined;
zb.cbtBodyGetCenterOfMassPosition(body_a, &body_position_a);
zb.cbtBodyGetCenterOfMassPosition(body_b, &body_position_b);
const position_a = (Vec3{ .c = pivot_a }).transform(Mat4.initArray4x3(trans_a));
const position_b = (Vec3{ .c = pivot_b }).transform(Mat4.initArray4x3(trans_b));
const color = zb.CbtVector3{ 1.0, 1.0, 0.0 };
zb.cbtWorldDebugDrawLine1(demo.physics_world, &position_a.c, &position_b.c, &color);
zb.cbtWorldDebugDrawLine1(demo.physics_world, &body_position_a, &position_a.c, &color);
zb.cbtWorldDebugDrawLine1(demo.physics_world, &body_position_b, &position_b.c, &color);
}
}
// Draw lines that connect 'connected_bodies'
{
var i: u32 = 0;
const num_bodies = @intCast(u32, demo.connected_bodies.items.len);
while (i < num_bodies) : (i += 2) {
const body0 = demo.connected_bodies.items[i].body;
const body1 = demo.connected_bodies.items[i + 1].body;
const pivot0 = demo.connected_bodies.items[i].pivot;
const pivot1 = demo.connected_bodies.items[i + 1].pivot;
var trans0: [4]zb.CbtVector3 = undefined;
var trans1: [4]zb.CbtVector3 = undefined;
zb.cbtBodyGetCenterOfMassTransform(body0, &trans0);
zb.cbtBodyGetCenterOfMassTransform(body1, &trans1);
const color = zb.CbtVector3{ 1.0, 1.0, 1.0 };
const p0 = pivot0.transform(Mat4.initArray4x3(trans0));
const p1 = pivot1.transform(Mat4.initArray4x3(trans1));
zb.cbtWorldDebugDrawLine1(demo.physics_world, &p0.c, &p1.c, &color);
}
}
}
fn draw(demo: *DemoState) void {
var gctx = &demo.gctx;
gctx.beginFrame();
const cam_world_to_view = Mat4.initLookToLh(
demo.camera.position,
demo.camera.forward,
Vec3.init(0.0, 1.0, 0.0),
);
const cam_view_to_clip = Mat4.initPerspectiveFovLh(
camera_fovy,
@intToFloat(f32, gctx.viewport_width) / @intToFloat(f32, gctx.viewport_height),
0.01,
200.0,
);
const cam_world_to_clip = cam_world_to_view.mul(cam_view_to_clip);
gctx.addTransitionBarrier(demo.color_texture, d3d12.RESOURCE_STATE_RENDER_TARGET);
gctx.flushResourceBarriers();
gctx.cmdlist.OMSetRenderTargets(
1,
&[_]d3d12.CPU_DESCRIPTOR_HANDLE{demo.color_texture_rtv},
w32.TRUE,
&demo.depth_texture_dsv,
);
gctx.cmdlist.ClearDepthStencilView(demo.depth_texture_dsv, d3d12.CLEAR_FLAG_DEPTH, 1.0, 0, 0, null);
gctx.cmdlist.ClearRenderTargetView(
demo.color_texture_rtv,
&[4]f32{ 0.0, 0.0, 0.0, 1.0 },
0,
null,
);
{
gctx.cmdlist.IASetVertexBuffers(0, 1, &[_]d3d12.VERTEX_BUFFER_VIEW{.{
.BufferLocation = gctx.lookupResource(demo.vertex_buffer).?.GetGPUVirtualAddress(),
.SizeInBytes = @intCast(u32, gctx.getResourceSize(demo.vertex_buffer)),
.StrideInBytes = @sizeOf(Vertex),
}});
gctx.cmdlist.IASetIndexBuffer(&.{
.BufferLocation = gctx.lookupResource(demo.index_buffer).?.GetGPUVirtualAddress(),
.SizeInBytes = @intCast(u32, gctx.getResourceSize(demo.index_buffer)),
.Format = .R32_UINT,
});
gctx.setCurrentPipeline(demo.simple_entity_pso);
gctx.cmdlist.IASetPrimitiveTopology(.TRIANGLELIST);
{
const mem = gctx.allocateUploadMemory(PsoSimpleEntity_FrameConst, 1);
mem.cpu_slice[0].world_to_clip = cam_world_to_clip.transpose();
mem.cpu_slice[0].camera_position = demo.camera.position;
gctx.cmdlist.SetGraphicsRootConstantBufferView(1, mem.gpu_base);
}
//
// Draw all entities
//
for (demo.entities.items) |entity| {
if (entity.mesh_index == mesh_compound) { // Meshes that consist of multiple simple shapes
const world_transform = blk: {
var transform: [4]zb.CbtVector3 = undefined;
zb.cbtBodyGetGraphicsWorldTransform(entity.body, &transform);
break :blk Mat4.initArray4x3(transform);
};
const shape = zb.cbtBodyGetShape(entity.body);
const num_childs = zb.cbtShapeCompoundGetNumChilds(shape);
var child_index: i32 = 0;
while (child_index < num_childs) : (child_index += 1) {
const local_transform = blk: {
var transform: [4]zb.CbtVector3 = undefined;
zb.cbtShapeCompoundGetChildTransform(shape, child_index, &transform);
break :blk Mat4.initArray4x3(transform);
};
const child_shape = zb.cbtShapeCompoundGetChild(shape, child_index);
const mesh_index = switch (zb.cbtShapeGetType(child_shape)) {
zb.CBT_SHAPE_TYPE_BOX => mesh_cube,
zb.CBT_SHAPE_TYPE_CYLINDER => mesh_cylinder,
zb.CBT_SHAPE_TYPE_SPHERE => mesh_sphere,
else => blk: {
assert(false);
break :blk 0;
},
};
const mesh_size = switch (zb.cbtShapeGetType(child_shape)) {
zb.CBT_SHAPE_TYPE_BOX => blk: {
var half_extents: Vec3 = undefined;
zb.cbtShapeBoxGetHalfExtentsWithoutMargin(child_shape, &half_extents.c);
break :blk half_extents;
},
zb.CBT_SHAPE_TYPE_CYLINDER => blk: {
assert(zb.cbtShapeCylinderGetUpAxis(child_shape) == zb.CBT_LINEAR_AXIS_Y);
var half_extents: Vec3 = undefined;
zb.cbtShapeCylinderGetHalfExtentsWithoutMargin(child_shape, &half_extents.c);
assert(half_extents.c[0] == half_extents.c[2]);
break :blk half_extents;
},
zb.CBT_SHAPE_TYPE_SPHERE => blk: {
const radius = zb.cbtShapeSphereGetRadius(child_shape);
break :blk Vec3.initS(radius);
},
else => blk: {
assert(false);
break :blk Vec3.initS(1);
},
};
const scaling = Mat4.initScaling(mesh_size);
const mem = gctx.allocateUploadMemory(PsoSimpleEntity_DrawConst, 1);
mem.cpu_slice[0].object_to_world = scaling.mul(local_transform.mul(world_transform)).transpose();
mem.cpu_slice[0].base_color_roughness = entity.base_color_roughness;
mem.cpu_slice[0].flags = entity.flags;
gctx.cmdlist.SetGraphicsRootConstantBufferView(0, mem.gpu_base);
gctx.cmdlist.DrawIndexedInstanced(
demo.meshes.items[mesh_index].num_indices,
1,
demo.meshes.items[mesh_index].index_offset,
@intCast(i32, demo.meshes.items[mesh_index].vertex_offset),
0,
);
}
} else { // Meshes that consist of single shape
var transform: [4]zb.CbtVector3 = undefined;
zb.cbtBodyGetGraphicsWorldTransform(entity.body, &transform);
const scaling = Mat4.initScaling(entity.size);
const mem = gctx.allocateUploadMemory(PsoSimpleEntity_DrawConst, 1);
mem.cpu_slice[0].object_to_world = scaling.mul(Mat4.initArray4x3(transform)).transpose();
mem.cpu_slice[0].base_color_roughness = entity.base_color_roughness;
mem.cpu_slice[0].flags = entity.flags;
gctx.cmdlist.SetGraphicsRootConstantBufferView(0, mem.gpu_base);
gctx.cmdlist.DrawIndexedInstanced(
demo.meshes.items[entity.mesh_index].num_indices,
1,
demo.meshes.items[entity.mesh_index].index_offset,
@intCast(i32, demo.meshes.items[entity.mesh_index].vertex_offset),
0,
);
}
}
}
zb.cbtWorldDebugDrawAll(demo.physics_world);
if (demo.physics_debug.lines.items.len > 0) {
gctx.setCurrentPipeline(demo.physics_debug_pso);
gctx.cmdlist.IASetPrimitiveTopology(.LINELIST);
{
const mem = gctx.allocateUploadMemory(Mat4, 1);
mem.cpu_slice[0] = cam_world_to_clip.transpose();
gctx.cmdlist.SetGraphicsRootConstantBufferView(0, mem.gpu_base);
}
const num_vertices = @intCast(u32, demo.physics_debug.lines.items.len);
{
const mem = gctx.allocateUploadMemory(PsoPhysicsDebug_Vertex, num_vertices);
for (demo.physics_debug.lines.items) |p, i| {
mem.cpu_slice[i] = p;
}
gctx.cmdlist.SetGraphicsRootShaderResourceView(1, mem.gpu_base);
}
gctx.cmdlist.DrawInstanced(num_vertices, 1, 0, 0);
demo.physics_debug.lines.clearRetainingCapacity();
}
demo.guir.draw(gctx);
const back_buffer = gctx.getBackBuffer();
gctx.addTransitionBarrier(back_buffer.resource_handle, d3d12.RESOURCE_STATE_RESOLVE_DEST);
gctx.addTransitionBarrier(demo.color_texture, d3d12.RESOURCE_STATE_RESOLVE_SOURCE);
gctx.flushResourceBarriers();
gctx.cmdlist.ResolveSubresource(
gctx.lookupResource(back_buffer.resource_handle).?,
0,
gctx.lookupResource(demo.color_texture).?,
0,
.R8G8B8A8_UNORM,
);
gctx.addTransitionBarrier(back_buffer.resource_handle, d3d12.RESOURCE_STATE_PRESENT);
gctx.flushResourceBarriers();
gctx.endFrame();
}
pub fn main() !void {
common.init();
defer common.deinit();
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
var demo = try init(allocator);
defer deinit(&demo, allocator);
while (common.handleWindowEvents()) {
update(&demo);
draw(&demo);
}
} | samples/bullet_physics_test/src/bullet_physics_test.zig |
const std = @import("std");
const print = std.debug.print;
const util = @import("util.zig");
const gpa = util.gpa;
const data = @embedFile("../data/day09.txt");
pub fn main() !void {
var timer = try std.time.Timer.start();
var arrayList = std.ArrayList(u8).init(gpa);
defer { arrayList.deinit(); }
var lines = std.mem.tokenize(data, "\r\n");
var maybeXWidth : ?usize = null;
while (lines.next()) |line| {
if (maybeXWidth == null) {
maybeXWidth = line.len + 2;
// If we're here it means we are the first row, so we pad in some 9's to
// make the future calculation easier (less checks).
var index : usize = 0;
while (index < maybeXWidth.?) : (index += 1) {
try arrayList.append(9);
}
} else {
std.debug.assert(maybeXWidth.? == (line.len + 2));
}
// We start each row with a 9.
try arrayList.append(9);
for (line) |c| {
try arrayList.append(c - '0');
}
// And end each row with a 9.
try arrayList.append(9);
}
// Now pad in a last row of 9's too.
{
var index : usize = 0;
while (index < maybeXWidth.?) : (index += 1) {
try arrayList.append(9);
}
}
const xWidth = maybeXWidth.?;
{
var sum : u32 = 0;
for (arrayList.items) |item, index| {
// 9's can never be low points so skip them.
if (item == 9) {
continue;
}
if (item >= arrayList.items[index - 1]) {
continue;
}
if (item >= arrayList.items[index + 1]) {
continue;
}
if (item >= arrayList.items[index - xWidth]) {
continue;
}
if (item >= arrayList.items[index + xWidth]) {
continue;
}
// We've got a low point!
sum += 1 + item;
}
print("🎁 Sum of low points: {}\n", .{sum});
print("Day 09 - part 01 took {:15}ns\n", .{timer.lap()});
timer.reset();
}
{
var threeLargestBasins = [_]usize{0} ** 3;
for (arrayList.items) |item, index| {
const count = countAndWipe(index, xWidth, arrayList);
if (count != 0) {
var smallestIndex : usize = 0;
for (threeLargestBasins) |basin, i| {
if (threeLargestBasins[smallestIndex] > basin) {
smallestIndex = i;
}
}
if (threeLargestBasins[smallestIndex] < count) {
threeLargestBasins[smallestIndex] = count;
}
}
}
const total = threeLargestBasins[0] *
threeLargestBasins[1] *
threeLargestBasins[2];
print("🎁 Three largest basins sum: {}\n", .{total});
print("Day 09 - part 02 took {:15}ns\n", .{timer.lap()});
print("❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️\n", .{});
}
}
fn countAndWipe(index : usize, xWidth : usize, map : std.ArrayList(u8)) usize {
// 9's are not in a basin so do not form part of the count.
if (map.items[index] == 9) {
return 0;
}
// We are included in the count so it starts as 1.
var count : usize = 1;
// Wipe ourselves from the map so we don't accidentally count us twice.
map.items[index] = 9;
const neighbours = [4]usize {
index - 1,
index + 1,
index - xWidth,
index + xWidth,
};
for (neighbours) |neighbour| {
count += countAndWipe(neighbour, xWidth, map);
}
return count;
} | src/day09.zig |
const builtin = @import("builtin");
const std = @import("std");
const opcodes = @import("opcodes.zig");
const Instruction = opcodes.Instruction;
const object = @import("object.zig");
const Function = object.Function;
const Constant = object.Constant;
pub const signature = "\x1BLua";
pub const luac_version: u8 = 0x51;
pub const luac_format: u8 = 0;
pub const luac_headersize = 12;
// TODO make this actually optional, once we are actually capable of outputting debug info
const strip_debug_info = true;
pub fn write(chunk: Function, writer: anytype) @TypeOf(writer).Error!void {
try writeHeader(writer);
try writeFunction(chunk, writer);
}
pub fn writeHeader(writer: anytype) @TypeOf(writer).Error!void {
try writer.writeAll(signature);
try writer.writeByte(luac_version);
try writer.writeByte(luac_format);
try writer.writeByte(@boolToInt(builtin.target.cpu.arch.endian() == .Little));
try writer.writeByte(@sizeOf(c_int));
try writer.writeByte(@sizeOf(usize));
try writer.writeByte(@sizeOf(opcodes.Instruction));
try writer.writeByte(@sizeOf(f64)); // sizeof(lua_Number)
try writer.writeByte(@boolToInt(false)); // is lua_Number an integer type?
}
pub fn writeFunction(function: Function, writer: anytype) @TypeOf(writer).Error!void {
// source info
const chunk_name: ?[]const u8 = if (strip_debug_info) null else function.name;
try writeString(chunk_name, writer);
try writer.writeIntNative(c_int, 0); // TODO: line defined
try writer.writeIntNative(c_int, 0); // TODO: last line defined
try writer.writeByte(function.num_upvalues);
try writer.writeByte(function.num_params);
try writer.writeByte(function.varargs.dump());
try writer.writeByte(function.max_stack_size);
// instructions
try writer.writeIntNative(c_int, @intCast(c_int, function.code.len));
try writer.writeAll(std.mem.sliceAsBytes(function.code));
// constants
// number of constants
try writer.writeIntNative(c_int, @intCast(c_int, function.constants.len));
// each constant is dumped as a byte for its type followed by a dump of the value
for (function.constants) |constant| {
switch (constant) {
.string => |string_literal| {
try writer.writeByte(object.Value.Type.string.bytecodeId());
try writeString(string_literal, writer);
},
.number => |number_literal| {
try writer.writeByte(object.Value.Type.number.bytecodeId());
try writer.writeAll(std.mem.asBytes(&number_literal));
},
.nil => {
try writer.writeByte(object.Value.Type.nil.bytecodeId());
},
.boolean => |val| {
try writer.writeByte(object.Value.Type.boolean.bytecodeId());
try writer.writeByte(@boolToInt(val));
},
}
}
// number of functions
try writer.writeIntNative(c_int, 0);
// TODO: functions
// debug
try writer.writeIntNative(c_int, 0); // TODO: sizelineinfo
// TODO: lineinfo
try writer.writeIntNative(c_int, 0); // TODO: sizelocvars
// TODO: locvars
try writer.writeIntNative(c_int, 0); // TODO: sizeupvalues
// TODO: upvalues
}
pub fn writeString(string: ?[]const u8, writer: anytype) @TypeOf(writer).Error!void {
if (string == null) {
try writer.writeIntNative(usize, 0);
} else {
try writer.writeIntNative(usize, string.?.len + 1);
try writer.writeAll(string.?);
try writer.writeByte(0);
}
}
test "header" {
var buf = std.ArrayList(u8).init(std.testing.allocator);
defer buf.deinit();
try writeHeader(buf.writer());
}
test "just return" {
var buf = std.ArrayList(u8).init(std.testing.allocator);
defer buf.deinit();
var chunk = Function{
.name = "",
.code = &[_]Instruction{
@bitCast(Instruction, Instruction.ABC.init(.@"return", 0, 1, 0)),
},
.constants = &[_]Constant{},
.max_stack_size = 0,
};
try write(chunk, buf.writer());
}
test "hello world" {
var buf = std.ArrayList(u8).init(std.testing.allocator);
defer buf.deinit();
var chunk = Function{
.allocator = null,
.name = "",
.code = &[_]Instruction{
@bitCast(Instruction, Instruction.ABx.init(.getglobal, 0, 0)),
@bitCast(Instruction, Instruction.ABx.init(.loadk, 1, 1)),
@bitCast(Instruction, Instruction.ABC.init(.call, 0, 2, 1)),
@bitCast(Instruction, Instruction.ABC.init(.@"return", 0, 1, 0)),
},
.constants = &[_]Constant{
Constant{ .string = "print" },
Constant{ .string = "hello world" },
},
.max_stack_size = 2,
};
try write(chunk, buf.writer());
}
test "constants" {
var buf = std.ArrayList(u8).init(std.testing.allocator);
defer buf.deinit();
var chunk = Function{
.allocator = null,
.name = "",
.code = &[_]Instruction{
@bitCast(Instruction, Instruction.ABC.init(.@"return", 0, 1, 0)),
},
.constants = &[_]Constant{
Constant{ .string = "print" },
Constant{ .string = "hello world" },
Constant{ .boolean = true },
Constant{ .boolean = false },
Constant.nil,
Constant{ .number = 123 },
},
.max_stack_size = 0,
};
try write(chunk, buf.writer());
//std.debug.print("{e}\n", .{buf.items});
} | src/dump.zig |
pub const WSB_MAX_OB_STATUS_VALUE_TYPE_PAIR = @as(u32, 5);
pub const WSB_MAX_OB_STATUS_ENTRY = @as(u32, 5);
//--------------------------------------------------------------------------------
// Section: Types (8)
//--------------------------------------------------------------------------------
// TODO: this type is limited to platform 'windowsServer2008'
const IID_IWsbApplicationBackupSupport_Value = @import("../zig.zig").Guid.initString("1eff3510-4a27-46ad-b9e0-08332f0f4f6d");
pub const IID_IWsbApplicationBackupSupport = &IID_IWsbApplicationBackupSupport_Value;
pub const IWsbApplicationBackupSupport = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
CheckConsistency: fn(
self: *const IWsbApplicationBackupSupport,
wszWriterMetadata: ?PWSTR,
wszComponentName: ?PWSTR,
wszComponentLogicalPath: ?PWSTR,
cVolumes: u32,
rgwszSourceVolumePath: [*]?PWSTR,
rgwszSnapshotVolumePath: [*]?PWSTR,
ppAsync: ?*?*IWsbApplicationAsync,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWsbApplicationBackupSupport_CheckConsistency(self: *const T, wszWriterMetadata: ?PWSTR, wszComponentName: ?PWSTR, wszComponentLogicalPath: ?PWSTR, cVolumes: u32, rgwszSourceVolumePath: [*]?PWSTR, rgwszSnapshotVolumePath: [*]?PWSTR, ppAsync: ?*?*IWsbApplicationAsync) callconv(.Inline) HRESULT {
return @ptrCast(*const IWsbApplicationBackupSupport.VTable, self.vtable).CheckConsistency(@ptrCast(*const IWsbApplicationBackupSupport, self), wszWriterMetadata, wszComponentName, wszComponentLogicalPath, cVolumes, rgwszSourceVolumePath, rgwszSnapshotVolumePath, ppAsync);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windowsServer2008'
const IID_IWsbApplicationRestoreSupport_Value = @import("../zig.zig").Guid.initString("8d3bdb38-4ee8-4718-85f9-c7dbc4ab77aa");
pub const IID_IWsbApplicationRestoreSupport = &IID_IWsbApplicationRestoreSupport_Value;
pub const IWsbApplicationRestoreSupport = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
PreRestore: fn(
self: *const IWsbApplicationRestoreSupport,
wszWriterMetadata: ?PWSTR,
wszComponentName: ?PWSTR,
wszComponentLogicalPath: ?PWSTR,
bNoRollForward: BOOLEAN,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
PostRestore: fn(
self: *const IWsbApplicationRestoreSupport,
wszWriterMetadata: ?PWSTR,
wszComponentName: ?PWSTR,
wszComponentLogicalPath: ?PWSTR,
bNoRollForward: BOOLEAN,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OrderComponents: fn(
self: *const IWsbApplicationRestoreSupport,
cComponents: u32,
rgComponentName: [*]?PWSTR,
rgComponentLogicalPaths: [*]?PWSTR,
prgComponentName: [*]?*?PWSTR,
prgComponentLogicalPath: [*]?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
IsRollForwardSupported: fn(
self: *const IWsbApplicationRestoreSupport,
pbRollForwardSupported: ?*u8,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWsbApplicationRestoreSupport_PreRestore(self: *const T, wszWriterMetadata: ?PWSTR, wszComponentName: ?PWSTR, wszComponentLogicalPath: ?PWSTR, bNoRollForward: BOOLEAN) callconv(.Inline) HRESULT {
return @ptrCast(*const IWsbApplicationRestoreSupport.VTable, self.vtable).PreRestore(@ptrCast(*const IWsbApplicationRestoreSupport, self), wszWriterMetadata, wszComponentName, wszComponentLogicalPath, bNoRollForward);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWsbApplicationRestoreSupport_PostRestore(self: *const T, wszWriterMetadata: ?PWSTR, wszComponentName: ?PWSTR, wszComponentLogicalPath: ?PWSTR, bNoRollForward: BOOLEAN) callconv(.Inline) HRESULT {
return @ptrCast(*const IWsbApplicationRestoreSupport.VTable, self.vtable).PostRestore(@ptrCast(*const IWsbApplicationRestoreSupport, self), wszWriterMetadata, wszComponentName, wszComponentLogicalPath, bNoRollForward);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWsbApplicationRestoreSupport_OrderComponents(self: *const T, cComponents: u32, rgComponentName: [*]?PWSTR, rgComponentLogicalPaths: [*]?PWSTR, prgComponentName: [*]?*?PWSTR, prgComponentLogicalPath: [*]?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IWsbApplicationRestoreSupport.VTable, self.vtable).OrderComponents(@ptrCast(*const IWsbApplicationRestoreSupport, self), cComponents, rgComponentName, rgComponentLogicalPaths, prgComponentName, prgComponentLogicalPath);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWsbApplicationRestoreSupport_IsRollForwardSupported(self: *const T, pbRollForwardSupported: ?*u8) callconv(.Inline) HRESULT {
return @ptrCast(*const IWsbApplicationRestoreSupport.VTable, self.vtable).IsRollForwardSupported(@ptrCast(*const IWsbApplicationRestoreSupport, self), pbRollForwardSupported);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windowsServer2008'
const IID_IWsbApplicationAsync_Value = @import("../zig.zig").Guid.initString("0843f6f7-895c-44a6-b0c2-05a5022aa3a1");
pub const IID_IWsbApplicationAsync = &IID_IWsbApplicationAsync_Value;
pub const IWsbApplicationAsync = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
QueryStatus: fn(
self: *const IWsbApplicationAsync,
phrResult: ?*HRESULT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Abort: fn(
self: *const IWsbApplicationAsync,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWsbApplicationAsync_QueryStatus(self: *const T, phrResult: ?*HRESULT) callconv(.Inline) HRESULT {
return @ptrCast(*const IWsbApplicationAsync.VTable, self.vtable).QueryStatus(@ptrCast(*const IWsbApplicationAsync, self), phrResult);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWsbApplicationAsync_Abort(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IWsbApplicationAsync.VTable, self.vtable).Abort(@ptrCast(*const IWsbApplicationAsync, self));
}
};}
pub usingnamespace MethodMixin(@This());
};
pub const WSB_OB_STATUS_ENTRY_PAIR_TYPE = enum(i32) {
UNDEFINED = 0,
STRING = 1,
NUMBER = 2,
DATETIME = 3,
TIME = 4,
SIZE = 5,
MAX = 6,
};
pub const WSB_OB_ET_UNDEFINED = WSB_OB_STATUS_ENTRY_PAIR_TYPE.UNDEFINED;
pub const WSB_OB_ET_STRING = WSB_OB_STATUS_ENTRY_PAIR_TYPE.STRING;
pub const WSB_OB_ET_NUMBER = WSB_OB_STATUS_ENTRY_PAIR_TYPE.NUMBER;
pub const WSB_OB_ET_DATETIME = WSB_OB_STATUS_ENTRY_PAIR_TYPE.DATETIME;
pub const WSB_OB_ET_TIME = WSB_OB_STATUS_ENTRY_PAIR_TYPE.TIME;
pub const WSB_OB_ET_SIZE = WSB_OB_STATUS_ENTRY_PAIR_TYPE.SIZE;
pub const WSB_OB_ET_MAX = WSB_OB_STATUS_ENTRY_PAIR_TYPE.MAX;
pub const WSB_OB_STATUS_ENTRY_VALUE_TYPE_PAIR = extern struct {
m_wszObStatusEntryPairValue: ?PWSTR,
m_ObStatusEntryPairType: WSB_OB_STATUS_ENTRY_PAIR_TYPE,
};
pub const WSB_OB_STATUS_ENTRY = extern struct {
m_dwIcon: u32,
m_dwStatusEntryName: u32,
m_dwStatusEntryValue: u32,
m_cValueTypePair: u32,
m_rgValueTypePair: ?*WSB_OB_STATUS_ENTRY_VALUE_TYPE_PAIR,
};
pub const WSB_OB_STATUS_INFO = extern struct {
m_guidSnapinId: Guid,
m_cStatusEntry: u32,
m_rgStatusEntry: ?*WSB_OB_STATUS_ENTRY,
};
pub const WSB_OB_REGISTRATION_INFO = extern struct {
m_wszResourceDLL: ?PWSTR,
m_guidSnapinId: Guid,
m_dwProviderName: u32,
m_dwProviderIcon: u32,
m_bSupportsRemoting: BOOLEAN,
};
//--------------------------------------------------------------------------------
// Section: Functions (0)
//--------------------------------------------------------------------------------
//--------------------------------------------------------------------------------
// Section: Unicode Aliases (0)
//--------------------------------------------------------------------------------
const thismodule = @This();
pub usingnamespace switch (@import("../zig.zig").unicode_mode) {
.ansi => struct {
},
.wide => struct {
},
.unspecified => if (@import("builtin").is_test) struct {
} else struct {
},
};
//--------------------------------------------------------------------------------
// Section: Imports (5)
//--------------------------------------------------------------------------------
const Guid = @import("../zig.zig").Guid;
const BOOLEAN = @import("../foundation.zig").BOOLEAN;
const HRESULT = @import("../foundation.zig").HRESULT;
const IUnknown = @import("../system/com.zig").IUnknown;
const PWSTR = @import("../foundation.zig").PWSTR;
test {
@setEvalBranchQuota(
@import("std").meta.declarations(@This()).len * 3
);
// reference all the pub declarations
if (!@import("builtin").is_test) return;
inline for (@import("std").meta.declarations(@This())) |decl| {
if (decl.is_pub) {
_ = decl;
}
}
} | deps/zigwin32/win32/system/server_backup.zig |
const DebugSymbols = @This();
const std = @import("std");
const build_options = @import("build_options");
const assert = std.debug.assert;
const fs = std.fs;
const link = @import("../../link.zig");
const log = std.log.scoped(.link);
const macho = std.macho;
const makeStaticString = MachO.makeStaticString;
const math = std.math;
const mem = std.mem;
const padToIdeal = MachO.padToIdeal;
const trace = @import("../../tracy.zig").trace;
const Allocator = mem.Allocator;
const Dwarf = @import("../Dwarf.zig");
const MachO = @import("../MachO.zig");
const Module = @import("../../Module.zig");
const TextBlock = MachO.TextBlock;
const Type = @import("../../type.zig").Type;
base: *MachO,
dwarf: Dwarf,
file: fs.File,
/// Table of all load commands
load_commands: std.ArrayListUnmanaged(macho.LoadCommand) = .{},
/// __PAGEZERO segment
pagezero_segment_cmd_index: ?u16 = null,
/// __TEXT segment
text_segment_cmd_index: ?u16 = null,
/// __DATA_CONST segment
data_const_segment_cmd_index: ?u16 = null,
/// __DATA segment
data_segment_cmd_index: ?u16 = null,
/// __LINKEDIT segment
linkedit_segment_cmd_index: ?u16 = null,
/// __DWARF segment
dwarf_segment_cmd_index: ?u16 = null,
/// Symbol table
symtab_cmd_index: ?u16 = null,
/// UUID load command
uuid_cmd_index: ?u16 = null,
/// Index into __TEXT,__text section.
text_section_index: ?u16 = null,
debug_info_section_index: ?u16 = null,
debug_abbrev_section_index: ?u16 = null,
debug_str_section_index: ?u16 = null,
debug_aranges_section_index: ?u16 = null,
debug_line_section_index: ?u16 = null,
load_commands_dirty: bool = false,
debug_string_table_dirty: bool = false,
debug_abbrev_section_dirty: bool = false,
debug_aranges_section_dirty: bool = false,
debug_info_header_dirty: bool = false,
debug_line_header_dirty: bool = false,
/// You must call this function *after* `MachO.populateMissingMetadata()`
/// has been called to get a viable debug symbols output.
pub fn populateMissingMetadata(self: *DebugSymbols, allocator: Allocator) !void {
if (self.uuid_cmd_index == null) {
const base_cmd = self.base.load_commands.items[self.base.uuid_cmd_index.?];
self.uuid_cmd_index = @intCast(u16, self.load_commands.items.len);
try self.load_commands.append(allocator, base_cmd);
self.load_commands_dirty = true;
}
if (self.symtab_cmd_index == null) {
self.symtab_cmd_index = @intCast(u16, self.load_commands.items.len);
try self.load_commands.append(self.base.base.allocator, .{
.symtab = .{
.cmdsize = @sizeOf(macho.symtab_command),
.symoff = 0,
.nsyms = 0,
.stroff = 0,
.strsize = 0,
},
});
self.load_commands_dirty = true;
}
if (self.pagezero_segment_cmd_index == null) {
self.pagezero_segment_cmd_index = @intCast(u16, self.load_commands.items.len);
const base_cmd = self.base.load_commands.items[self.base.pagezero_segment_cmd_index.?].segment;
const cmd = try self.copySegmentCommand(allocator, base_cmd);
try self.load_commands.append(allocator, .{ .segment = cmd });
self.load_commands_dirty = true;
}
if (self.text_segment_cmd_index == null) {
self.text_segment_cmd_index = @intCast(u16, self.load_commands.items.len);
const base_cmd = self.base.load_commands.items[self.base.text_segment_cmd_index.?].segment;
const cmd = try self.copySegmentCommand(allocator, base_cmd);
try self.load_commands.append(allocator, .{ .segment = cmd });
self.load_commands_dirty = true;
}
if (self.data_const_segment_cmd_index == null) outer: {
if (self.base.data_const_segment_cmd_index == null) break :outer; // __DATA_CONST is optional
self.data_const_segment_cmd_index = @intCast(u16, self.load_commands.items.len);
const base_cmd = self.base.load_commands.items[self.base.data_const_segment_cmd_index.?].segment;
const cmd = try self.copySegmentCommand(allocator, base_cmd);
try self.load_commands.append(allocator, .{ .segment = cmd });
self.load_commands_dirty = true;
}
if (self.data_segment_cmd_index == null) outer: {
if (self.base.data_segment_cmd_index == null) break :outer; // __DATA is optional
self.data_segment_cmd_index = @intCast(u16, self.load_commands.items.len);
const base_cmd = self.base.load_commands.items[self.base.data_segment_cmd_index.?].segment;
const cmd = try self.copySegmentCommand(allocator, base_cmd);
try self.load_commands.append(allocator, .{ .segment = cmd });
self.load_commands_dirty = true;
}
if (self.linkedit_segment_cmd_index == null) {
self.linkedit_segment_cmd_index = @intCast(u16, self.load_commands.items.len);
const base_cmd = self.base.load_commands.items[self.base.linkedit_segment_cmd_index.?].segment;
var cmd = try self.copySegmentCommand(allocator, base_cmd);
// TODO this needs reworking
cmd.inner.vmsize = self.base.page_size;
cmd.inner.fileoff = self.base.page_size;
cmd.inner.filesize = self.base.page_size;
try self.load_commands.append(allocator, .{ .segment = cmd });
self.load_commands_dirty = true;
}
if (self.dwarf_segment_cmd_index == null) {
self.dwarf_segment_cmd_index = @intCast(u16, self.load_commands.items.len);
const linkedit = self.load_commands.items[self.linkedit_segment_cmd_index.?].segment;
const ideal_size: u16 = 200 + 128 + 160 + 250;
const needed_size = mem.alignForwardGeneric(u64, padToIdeal(ideal_size), self.base.page_size);
const fileoff = linkedit.inner.fileoff + linkedit.inner.filesize;
const vmaddr = linkedit.inner.vmaddr + linkedit.inner.vmsize;
log.debug("found __DWARF segment free space 0x{x} to 0x{x}", .{ fileoff, fileoff + needed_size });
try self.load_commands.append(allocator, .{
.segment = .{
.inner = .{
.segname = makeStaticString("__DWARF"),
.vmaddr = vmaddr,
.vmsize = needed_size,
.fileoff = fileoff,
.filesize = needed_size,
},
},
});
self.load_commands_dirty = true;
}
if (self.debug_str_section_index == null) {
assert(self.dwarf.strtab.items.len == 0);
self.debug_str_section_index = try self.allocateSection(
"__debug_str",
@intCast(u32, self.dwarf.strtab.items.len),
0,
);
self.debug_string_table_dirty = true;
}
if (self.debug_info_section_index == null) {
self.debug_info_section_index = try self.allocateSection("__debug_info", 200, 0);
self.debug_info_header_dirty = true;
}
if (self.debug_abbrev_section_index == null) {
self.debug_abbrev_section_index = try self.allocateSection("__debug_abbrev", 128, 0);
self.debug_abbrev_section_dirty = true;
}
if (self.debug_aranges_section_index == null) {
self.debug_aranges_section_index = try self.allocateSection("__debug_aranges", 160, 4);
self.debug_aranges_section_dirty = true;
}
if (self.debug_line_section_index == null) {
self.debug_line_section_index = try self.allocateSection("__debug_line", 250, 0);
self.debug_line_header_dirty = true;
}
}
fn allocateSection(self: *DebugSymbols, sectname: []const u8, size: u64, alignment: u16) !u16 {
const seg = &self.load_commands.items[self.dwarf_segment_cmd_index.?].segment;
var sect = macho.section_64{
.sectname = makeStaticString(sectname),
.segname = seg.inner.segname,
.size = @intCast(u32, size),
.@"align" = alignment,
};
const alignment_pow_2 = try math.powi(u32, 2, alignment);
const off = self.findFreeSpace(size, alignment_pow_2);
assert(off + size <= seg.inner.fileoff + seg.inner.filesize); // TODO expand
log.debug("found {s},{s} section free space 0x{x} to 0x{x}", .{
sect.segName(),
sect.sectName(),
off,
off + size,
});
sect.addr = seg.inner.vmaddr + off - seg.inner.fileoff;
sect.offset = @intCast(u32, off);
const index = @intCast(u16, seg.sections.items.len);
try seg.sections.append(self.base.base.allocator, sect);
seg.inner.cmdsize += @sizeOf(macho.section_64);
seg.inner.nsects += 1;
// TODO
// const match = MatchingSection{
// .seg = segment_id,
// .sect = index,
// };
// _ = try self.section_ordinals.getOrPut(self.base.allocator, match);
// try self.block_free_lists.putNoClobber(self.base.allocator, match, .{});
self.load_commands_dirty = true;
return index;
}
fn detectAllocCollision(self: *DebugSymbols, start: u64, size: u64) ?u64 {
const seg = self.load_commands.items[self.dwarf_segment_cmd_index.?].segment;
const end = start + padToIdeal(size);
for (seg.sections.items) |section| {
const increased_size = padToIdeal(section.size);
const test_end = section.offset + increased_size;
if (end > section.offset and start < test_end) {
return test_end;
}
}
return null;
}
pub fn findFreeSpace(self: *DebugSymbols, object_size: u64, min_alignment: u64) u64 {
const seg = self.load_commands.items[self.dwarf_segment_cmd_index.?].segment;
var offset: u64 = seg.inner.fileoff;
while (self.detectAllocCollision(offset, object_size)) |item_end| {
offset = mem.alignForwardGeneric(u64, item_end, min_alignment);
}
return offset;
}
pub fn flushModule(self: *DebugSymbols, allocator: Allocator, options: link.Options) !void {
// TODO This linker code currently assumes there is only 1 compilation unit and it corresponds to the
// Zig source code.
const module = options.module orelse return error.LinkingWithoutZigSourceUnimplemented;
if (self.debug_abbrev_section_dirty) {
try self.dwarf.writeDbgAbbrev(&self.base.base);
self.load_commands_dirty = true;
self.debug_abbrev_section_dirty = false;
}
if (self.debug_info_header_dirty) {
// Currently only one compilation unit is supported, so the address range is simply
// identical to the main program header virtual address and memory size.
const text_segment = self.load_commands.items[self.text_segment_cmd_index.?].segment;
const text_section = text_segment.sections.items[self.text_section_index.?];
const low_pc = text_section.addr;
const high_pc = text_section.addr + text_section.size;
try self.dwarf.writeDbgInfoHeader(&self.base.base, module, low_pc, high_pc);
self.debug_info_header_dirty = false;
}
if (self.debug_aranges_section_dirty) {
// Currently only one compilation unit is supported, so the address range is simply
// identical to the main program header virtual address and memory size.
const text_segment = self.load_commands.items[self.text_segment_cmd_index.?].segment;
const text_section = text_segment.sections.items[self.text_section_index.?];
try self.dwarf.writeDbgAranges(&self.base.base, text_section.addr, text_section.size);
self.load_commands_dirty = true;
self.debug_aranges_section_dirty = false;
}
if (self.debug_line_header_dirty) {
try self.dwarf.writeDbgLineHeader(&self.base.base, module);
self.debug_line_header_dirty = false;
}
{
const dwarf_segment = &self.load_commands.items[self.dwarf_segment_cmd_index.?].segment;
const debug_strtab_sect = &dwarf_segment.sections.items[self.debug_str_section_index.?];
if (self.debug_string_table_dirty or self.dwarf.strtab.items.len != debug_strtab_sect.size) {
const allocated_size = self.allocatedSize(debug_strtab_sect.offset);
const needed_size = self.dwarf.strtab.items.len;
if (needed_size > allocated_size) {
debug_strtab_sect.size = 0; // free the space
const new_offset = self.findFreeSpace(needed_size, 1);
debug_strtab_sect.addr = dwarf_segment.inner.vmaddr + new_offset - dwarf_segment.inner.fileoff;
debug_strtab_sect.offset = @intCast(u32, new_offset);
}
debug_strtab_sect.size = @intCast(u32, needed_size);
log.debug("__debug_strtab start=0x{x} end=0x{x}", .{
debug_strtab_sect.offset,
debug_strtab_sect.offset + needed_size,
});
try self.file.pwriteAll(self.dwarf.strtab.items, debug_strtab_sect.offset);
self.load_commands_dirty = true;
self.debug_string_table_dirty = false;
}
}
self.updateDwarfSegment();
try self.writeLinkeditSegment();
try self.updateVirtualMemoryMapping();
try self.writeLoadCommands(allocator);
try self.writeHeader();
assert(!self.load_commands_dirty);
assert(!self.debug_abbrev_section_dirty);
assert(!self.debug_aranges_section_dirty);
assert(!self.debug_string_table_dirty);
}
pub fn deinit(self: *DebugSymbols, allocator: Allocator) void {
for (self.load_commands.items) |*lc| {
lc.deinit(allocator);
}
self.load_commands.deinit(allocator);
self.dwarf.deinit();
self.file.close();
}
fn copySegmentCommand(
self: *DebugSymbols,
allocator: Allocator,
base_cmd: macho.SegmentCommand,
) !macho.SegmentCommand {
var cmd = macho.SegmentCommand{
.inner = .{
.segname = undefined,
.cmdsize = base_cmd.inner.cmdsize,
.vmaddr = base_cmd.inner.vmaddr,
.vmsize = base_cmd.inner.vmsize,
.maxprot = base_cmd.inner.maxprot,
.initprot = base_cmd.inner.initprot,
.nsects = base_cmd.inner.nsects,
.flags = base_cmd.inner.flags,
},
};
mem.copy(u8, &cmd.inner.segname, &base_cmd.inner.segname);
try cmd.sections.ensureTotalCapacity(allocator, cmd.inner.nsects);
for (base_cmd.sections.items) |base_sect, i| {
var sect = macho.section_64{
.sectname = undefined,
.segname = undefined,
.addr = base_sect.addr,
.size = base_sect.size,
.offset = 0,
.@"align" = base_sect.@"align",
.reloff = 0,
.nreloc = 0,
.flags = base_sect.flags,
.reserved1 = base_sect.reserved1,
.reserved2 = base_sect.reserved2,
.reserved3 = base_sect.reserved3,
};
mem.copy(u8, §.sectname, &base_sect.sectname);
mem.copy(u8, §.segname, &base_sect.segname);
if (self.base.text_section_index.? == i) {
self.text_section_index = @intCast(u16, i);
}
cmd.sections.appendAssumeCapacity(sect);
}
return cmd;
}
fn updateDwarfSegment(self: *DebugSymbols) void {
const dwarf_segment = &self.load_commands.items[self.dwarf_segment_cmd_index.?].segment;
var max_offset: u64 = 0;
for (dwarf_segment.sections.items) |sect| {
log.debug(" {s},{s} - 0x{x}-0x{x} - 0x{x}-0x{x}", .{
sect.segName(),
sect.sectName(),
sect.offset,
sect.offset + sect.size,
sect.addr,
sect.addr + sect.size,
});
if (sect.offset + sect.size > max_offset) {
max_offset = sect.offset + sect.size;
}
}
const file_size = max_offset - dwarf_segment.inner.fileoff;
log.debug("__DWARF size 0x{x}", .{file_size});
if (file_size != dwarf_segment.inner.filesize) {
dwarf_segment.inner.filesize = file_size;
if (dwarf_segment.inner.vmsize < dwarf_segment.inner.filesize) {
dwarf_segment.inner.vmsize = mem.alignForwardGeneric(u64, dwarf_segment.inner.filesize, self.base.page_size);
}
self.load_commands_dirty = true;
}
}
/// Writes all load commands and section headers.
fn writeLoadCommands(self: *DebugSymbols, allocator: Allocator) !void {
if (!self.load_commands_dirty) return;
var sizeofcmds: u32 = 0;
for (self.load_commands.items) |lc| {
sizeofcmds += lc.cmdsize();
}
var buffer = try allocator.alloc(u8, sizeofcmds);
defer allocator.free(buffer);
var writer = std.io.fixedBufferStream(buffer).writer();
for (self.load_commands.items) |lc| {
try lc.write(writer);
}
const off = @sizeOf(macho.mach_header_64);
log.debug("writing {} load commands from 0x{x} to 0x{x}", .{ self.load_commands.items.len, off, off + sizeofcmds });
try self.file.pwriteAll(buffer, off);
self.load_commands_dirty = false;
}
fn writeHeader(self: *DebugSymbols) !void {
var header: macho.mach_header_64 = .{};
header.filetype = macho.MH_DSYM;
switch (self.base.base.options.target.cpu.arch) {
.aarch64 => {
header.cputype = macho.CPU_TYPE_ARM64;
header.cpusubtype = macho.CPU_SUBTYPE_ARM_ALL;
},
.x86_64 => {
header.cputype = macho.CPU_TYPE_X86_64;
header.cpusubtype = macho.CPU_SUBTYPE_X86_64_ALL;
},
else => return error.UnsupportedCpuArchitecture,
}
header.ncmds = @intCast(u32, self.load_commands.items.len);
header.sizeofcmds = 0;
for (self.load_commands.items) |cmd| {
header.sizeofcmds += cmd.cmdsize();
}
log.debug("writing Mach-O header {}", .{header});
try self.file.pwriteAll(mem.asBytes(&header), 0);
}
pub fn allocatedSize(self: *DebugSymbols, start: u64) u64 {
const seg = self.load_commands.items[self.dwarf_segment_cmd_index.?].segment;
assert(start >= seg.inner.fileoff);
var min_pos: u64 = std.math.maxInt(u64);
for (seg.sections.items) |section| {
if (section.offset <= start) continue;
if (section.offset < min_pos) min_pos = section.offset;
}
return min_pos - start;
}
fn updateVirtualMemoryMapping(self: *DebugSymbols) !void {
const macho_file = self.base;
const allocator = macho_file.base.allocator;
const IndexTuple = std.meta.Tuple(&[_]type{ *?u16, *?u16 });
const indices = &[_]IndexTuple{
.{ &macho_file.text_segment_cmd_index, &self.text_segment_cmd_index },
.{ &macho_file.data_const_segment_cmd_index, &self.data_const_segment_cmd_index },
.{ &macho_file.data_segment_cmd_index, &self.data_segment_cmd_index },
};
for (indices) |tuple| {
const orig_cmd = macho_file.load_commands.items[tuple[0].*.?].segment;
const cmd = try self.copySegmentCommand(allocator, orig_cmd);
const comp_cmd = &self.load_commands.items[tuple[1].*.?];
comp_cmd.deinit(allocator);
self.load_commands.items[tuple[1].*.?] = .{ .segment = cmd };
}
// TODO should we set the linkedit vmsize to that of the binary?
const orig_cmd = macho_file.load_commands.items[macho_file.linkedit_segment_cmd_index.?].segment;
const orig_vmaddr = orig_cmd.inner.vmaddr;
const linkedit_cmd = &self.load_commands.items[self.linkedit_segment_cmd_index.?].segment;
linkedit_cmd.inner.vmaddr = orig_vmaddr;
// Update VM address for the DWARF segment and sections including re-running relocations.
// TODO re-run relocations
const dwarf_cmd = &self.load_commands.items[self.dwarf_segment_cmd_index.?].segment;
const new_start_aligned = orig_vmaddr + linkedit_cmd.inner.vmsize;
const old_start_aligned = dwarf_cmd.inner.vmaddr;
const diff = new_start_aligned - old_start_aligned;
if (diff > 0) {
dwarf_cmd.inner.vmaddr = new_start_aligned;
for (dwarf_cmd.sections.items) |*sect| {
sect.addr += (new_start_aligned - old_start_aligned);
}
}
self.load_commands_dirty = true;
}
fn writeLinkeditSegment(self: *DebugSymbols) !void {
const tracy = trace(@src());
defer tracy.end();
try self.writeSymbolTable();
try self.writeStringTable();
const seg = &self.load_commands.items[self.linkedit_segment_cmd_index.?].segment;
const aligned_size = mem.alignForwardGeneric(u64, seg.inner.filesize, self.base.page_size);
seg.inner.filesize = aligned_size;
seg.inner.vmsize = aligned_size;
}
fn writeSymbolTable(self: *DebugSymbols) !void {
const tracy = trace(@src());
defer tracy.end();
const seg = &self.load_commands.items[self.linkedit_segment_cmd_index.?].segment;
const symtab = &self.load_commands.items[self.symtab_cmd_index.?].symtab;
symtab.symoff = @intCast(u32, seg.inner.fileoff);
var locals = std.ArrayList(macho.nlist_64).init(self.base.base.allocator);
defer locals.deinit();
for (self.base.locals.items) |sym| {
if (sym.n_strx == 0) continue;
if (self.base.symbol_resolver.get(sym.n_strx)) |_| continue;
try locals.append(sym);
}
const nlocals = locals.items.len;
const nexports = self.base.globals.items.len;
const locals_off = symtab.symoff;
const locals_size = nlocals * @sizeOf(macho.nlist_64);
const exports_off = locals_off + locals_size;
const exports_size = nexports * @sizeOf(macho.nlist_64);
symtab.nsyms = @intCast(u32, nlocals + nexports);
const needed_size = (nlocals + nexports) * @sizeOf(macho.nlist_64);
if (needed_size > seg.inner.filesize) {
const aligned_size = mem.alignForwardGeneric(u64, needed_size, self.base.page_size);
const diff = @intCast(u32, aligned_size - seg.inner.filesize);
const dwarf_seg = &self.load_commands.items[self.dwarf_segment_cmd_index.?].segment;
seg.inner.filesize = aligned_size;
try MachO.copyRangeAllOverlappingAlloc(
self.base.base.allocator,
self.file,
dwarf_seg.inner.fileoff,
dwarf_seg.inner.fileoff + diff,
try math.cast(usize, dwarf_seg.inner.filesize),
);
const old_seg_fileoff = dwarf_seg.inner.fileoff;
dwarf_seg.inner.fileoff += diff;
log.debug(" (moving __DWARF segment from 0x{x} to 0x{x})", .{ old_seg_fileoff, dwarf_seg.inner.fileoff });
for (dwarf_seg.sections.items) |*sect| {
const old_offset = sect.offset;
sect.offset += diff;
log.debug(" (moving {s},{s} from 0x{x} to 0x{x})", .{
sect.segName(),
sect.sectName(),
old_offset,
sect.offset,
});
}
}
log.debug("writing local symbols from 0x{x} to 0x{x}", .{ locals_off, locals_size + locals_off });
try self.file.pwriteAll(mem.sliceAsBytes(locals.items), locals_off);
log.debug("writing exported symbols from 0x{x} to 0x{x}", .{ exports_off, exports_size + exports_off });
try self.file.pwriteAll(mem.sliceAsBytes(self.base.globals.items), exports_off);
self.load_commands_dirty = true;
}
fn writeStringTable(self: *DebugSymbols) !void {
const tracy = trace(@src());
defer tracy.end();
const seg = &self.load_commands.items[self.linkedit_segment_cmd_index.?].segment;
const symtab = &self.load_commands.items[self.symtab_cmd_index.?].symtab;
const symtab_size = @intCast(u32, symtab.nsyms * @sizeOf(macho.nlist_64));
symtab.stroff = symtab.symoff + symtab_size;
const needed_size = mem.alignForwardGeneric(u64, self.base.strtab.items.len, @alignOf(u64));
symtab.strsize = @intCast(u32, needed_size);
if (symtab_size + needed_size > seg.inner.filesize) {
const aligned_size = mem.alignForwardGeneric(u64, symtab_size + needed_size, self.base.page_size);
const diff = @intCast(u32, aligned_size - seg.inner.filesize);
const dwarf_seg = &self.load_commands.items[self.dwarf_segment_cmd_index.?].segment;
seg.inner.filesize = aligned_size;
try MachO.copyRangeAllOverlappingAlloc(
self.base.base.allocator,
self.file,
dwarf_seg.inner.fileoff,
dwarf_seg.inner.fileoff + diff,
try math.cast(usize, dwarf_seg.inner.filesize),
);
const old_seg_fileoff = dwarf_seg.inner.fileoff;
dwarf_seg.inner.fileoff += diff;
log.debug(" (moving __DWARF segment from 0x{x} to 0x{x})", .{ old_seg_fileoff, dwarf_seg.inner.fileoff });
for (dwarf_seg.sections.items) |*sect| {
const old_offset = sect.offset;
sect.offset += diff;
log.debug(" (moving {s},{s} from 0x{x} to 0x{x})", .{
sect.segName(),
sect.sectName(),
old_offset,
sect.offset,
});
}
}
log.debug("writing string table from 0x{x} to 0x{x}", .{ symtab.stroff, symtab.stroff + symtab.strsize });
try self.file.pwriteAll(self.base.strtab.items, symtab.stroff);
self.load_commands_dirty = true;
}
pub fn updateDeclLineNumber(self: *DebugSymbols, module: *Module, decl: *const Module.Decl) !void {
_ = module;
return self.dwarf.updateDeclLineNumber(&self.base.base, decl);
}
/// Caller owns the returned memory.
pub fn initDeclDebugInfo(self: *DebugSymbols, module: *Module, decl: *Module.Decl) !Dwarf.DeclDebugBuffers {
_ = module;
return self.dwarf.initDeclDebugInfo(decl);
}
pub fn commitDeclDebugInfo(
self: *DebugSymbols,
module: *Module,
decl: *Module.Decl,
debug_buffers: *Dwarf.DeclDebugBuffers,
) !void {
const symbol = self.base.locals.items[decl.link.macho.local_sym_index];
const atom = &decl.link.macho;
return self.dwarf.commitDeclDebugInfo(&self.base.base, module, decl, symbol.n_value, atom.size, debug_buffers);
} | src/link/MachO/DebugSymbols.zig |
const std = @import("std");
const Execution = @import("execution.zig");
pub const Meta = struct {
code: std.wasm.Opcode,
func_name: []const u8,
arg_kind: Arg.Kind,
push: ?Stack.Change,
pop: []const Stack.Change,
pub fn name(self: Meta) []const u8 {
return self.func_name[5..];
}
pub const sparse = sparse: {
@setEvalBranchQuota(10000);
const decls = publicFunctions(Impl);
var result: [decls.len]Meta = undefined;
for (decls) |decl, i| {
const args = @typeInfo(decl.data.Fn.fn_type).Fn.args;
const ctx_type = args[0].arg_type.?;
const arg_type = args[1].arg_type.?;
const pop_type = args[2].arg_type.?;
if (@typeInfo(pop_type) != .Pointer) @compileError("Pop must be a pointer: " ++ @typeName(pop_type));
const pop_ref_type = std.meta.Child(pop_type);
const return_type = decl.data.Fn.return_type;
const push_type = switch (@typeInfo(decl.data.Fn.return_type)) {
.ErrorUnion => |eu_info| blk: {
for (std.meta.fields(eu_info.error_set)) |err| {
if (!errContains(WasmTrap, err.name)) {
@compileError("Unhandleable error: " ++ err.name);
}
}
break :blk eu_info.payload;
},
else => return_type,
};
result[i] = .{
.code = parseOpcode(decl.name) catch @compileError("Not a known hex: " ++ decl.name[0..4]),
.func_name = decl.name,
.arg_kind = Arg.Kind.init(arg_type),
.push = Stack.Change.initPush(push_type),
.pop = switch (pop_ref_type) {
Fixval.Void => &[0]Stack.Change{},
else => switch (@typeInfo(pop_ref_type)) {
.Union => &[1]Stack.Change{Stack.Change.initPop(pop_ref_type)},
.Struct => |s_info| blk: {
var pop_changes: [s_info.fields.len]Stack.Change = undefined;
for (s_info.fields) |field, f| {
pop_changes[f] = Stack.Change.initPop(field.field_type);
}
break :blk &pop_changes;
},
else => @compileError("Unsupported pop type: " ++ @typeName(pop_type)),
},
},
};
}
break :sparse result;
};
pub fn of(code: std.wasm.Opcode) Meta {
return all[@enumToInt(code)].?;
}
pub const all = blk: {
var result = [_]?Meta{null} ** 256;
for (sparse) |meta| {
const raw_code = @enumToInt(meta.code);
if (result[raw_code] != null) {
var buf: [100]u8 = undefined;
@compileError(try std.fmt.bufPrint(&buf, "Collision: '0x{X} {}'", .{ code, meta.name }));
}
result[raw_code] = meta;
}
break :blk result;
};
};
/// Generic memory chunk capable of representing any wasm type.
/// Useful for storing stack variables, locals, and globals.
pub const Fixval = extern union {
I32: i32,
U32: u32,
I64: i64,
U64: u64,
F32: f32,
F64: f64,
V128: i128, // TODO: make this a real vector
pub fn format(self: Fixval, comptime fmt: []const u8, opts: std.fmt.FormatOptions, writer: anytype) !void {
try writer.print("Fixval(0x{x})", .{@bitCast(u128, self)});
}
pub const Void = extern struct {
_pad: u128,
};
const I32 = extern union {
data: i32,
_pad: u128,
};
const U32 = extern union {
data: u32,
_pad: u128,
};
const I64 = extern union {
data: i64,
_pad: u128,
};
const U64 = extern union {
data: u64,
_pad: u128,
};
const F32 = extern union {
data: f32,
_pad: u128,
};
const F64 = extern union {
data: f64,
_pad: u128,
};
};
test "Fixval subtype sizes" {
inline for (std.meta.declarations(Fixval)) |decl| {
if (decl.data == .Type) {
try std.testing.expectEqual(@sizeOf(Fixval), @sizeOf(decl.data.Type));
}
}
}
pub const Arg = extern union {
I32: i32,
U32: u32,
I64: i64,
U64: u64,
F32: f32,
F64: f64,
Type: Type,
U32z: U32z,
Mem: Mem,
Array: Array,
pub fn format(self: Arg, comptime fmt: []const u8, opts: std.fmt.FormatOptions, writer: anytype) !void {
try writer.print("Arg(0x{x})", .{@bitCast(u128, self)});
}
pub const Kind = enum {
Void,
I32,
U32,
I64,
U64,
F32,
F64,
Type,
U32z,
Mem,
Array,
fn init(comptime T: type) Kind {
return switch (T) {
Fixval.Void => .Void,
Fixval.I32 => .I32,
Fixval.U32 => .U32,
Fixval.I64 => .I64,
Fixval.U64 => .U64,
Fixval.F32 => .F32,
Fixval.F64 => .F64,
Type => .Type,
U32z => .U32z,
Mem => .Mem,
Array => .Array,
else => @compileError("Unsupported arg type: " ++ @typeName(T)),
};
}
};
pub const Type = enum(u128) {
Void = 0x40,
I32 = 0x7F,
I64 = 0x7E,
F32 = 0x7D,
F64 = 0x7C,
};
pub const U32z = extern struct {
data: u32,
reserved: u8,
// Zig bug -- won't pack correctly without manually splitting this
_pad0: u8 = 0,
_pad1: u16 = 0,
_pad2: u64 = 0,
};
pub const Mem = extern struct {
offset: u32,
align_: u32,
_pad: u64 = 0,
};
// TODO: make this extern
pub const Array = packed struct {
ptr: [*]u32,
len: usize,
_pad: std.meta.Int(.unsigned, 128 - 2 * @bitSizeOf(usize)) = 0,
};
};
pub const Stack = struct {
pub const Change = enum {
I32,
I64,
F32,
F64,
Poly,
fn initPush(comptime T: type) ?Change {
return switch (T) {
void => null,
i32, u32 => Change.I32,
i64, u64 => Change.I64,
f32 => Change.F32,
f64 => Change.F64,
Fixval => Change.Poly,
else => @compileError("Unsupported type: " ++ @typeName(T)),
};
}
fn initPop(comptime T: type) Change {
return switch (T) {
Fixval.I32, Fixval.U32 => .I32,
Fixval.I64, Fixval.U64 => .I64,
Fixval.F32 => .F32,
Fixval.F64 => .F64,
Fixval => .Poly,
else => @compileError("Unsupported type: " ++ @typeName(T)),
};
}
};
};
fn errContains(comptime err_set: type, comptime name: []const u8) bool {
std.debug.assert(@typeInfo(err_set) == .ErrorSet);
for (std.meta.fields(err_set)) |err| {
if (std.mem.eql(u8, err.name, name)) {
return true;
}
}
return false;
}
fn publicFunctions(comptime T: type) []std.builtin.TypeInfo.Declaration {
const decls = std.meta.declarations(T);
var result: [decls.len]std.builtin.TypeInfo.Declaration = undefined;
var cursor: usize = 0;
for (decls) |decl| {
if (decl.is_pub and decl.data == .Fn) {
result[cursor] = decl;
cursor += 1;
}
}
return result[0..cursor];
}
test "ops" {
const nop = Meta.of(.nop);
try std.testing.expectEqual(nop.arg_kind, .Void);
try std.testing.expectEqual(nop.push, null);
try std.testing.expectEqual(nop.pop.len, 0);
const i32_load = Meta.of(.i32_load);
try std.testing.expectEqual(i32_load.arg_kind, .Mem);
try std.testing.expectEqual(i32_load.push, .I32);
try std.testing.expectEqual(i32_load.pop.len, 1);
try std.testing.expectEqual(i32_load.pop[0], .I32);
const select = Meta.of(.select);
try std.testing.expectEqual(select.arg_kind, .Void);
try std.testing.expectEqual(select.push, .Poly);
try std.testing.expectEqual(select.pop.len, 3);
try std.testing.expectEqual(select.pop[0], .Poly);
try std.testing.expectEqual(select.pop[1], .Poly);
try std.testing.expectEqual(select.pop[2], .I32);
}
pub const WasmTrap = error{
Unreachable,
Overflow,
OutOfBounds,
DivisionByZero,
InvalidConversionToInteger,
IndirectCalleeAbsent,
IndirectCallTypeMismatch,
};
pub fn step(op: std.wasm.Opcode, ctx: *Execution, arg: Arg, pop: [*]Fixval) WasmTrap!?Fixval {
// TODO: test out function pointers for performance comparison
// LLVM optimizes this inline for / mem.eql as a jump table
// Please benchmark if we try to to optimize this.
inline for (Meta.sparse) |meta| {
if (meta.code == op) {
return stepName(meta.func_name, ctx, arg, pop);
}
}
unreachable; // Op parse error
}
pub inline fn stepName(comptime func_name: []const u8, ctx: *Execution, arg: Arg, pop: [*]Fixval) WasmTrap!?Fixval {
const func = @field(Impl, func_name);
const args = @typeInfo(@TypeOf(func)).Fn.args;
const result = func(
ctx,
switch (args[1].arg_type.?) {
Arg.Type => arg.Type,
else => @bitCast(args[1].arg_type.?, arg),
},
@ptrCast(args[2].arg_type.?, pop),
);
const result_value = if (@typeInfo(@TypeOf(result)) == .ErrorUnion) try result else result;
return switch (@TypeOf(result_value)) {
void => null,
i32 => Fixval{ .I32 = result_value },
u32 => Fixval{ .U32 = result_value },
i64 => Fixval{ .I64 = result_value },
u64 => Fixval{ .U64 = result_value },
f32 => Fixval{ .F32 = result_value },
f64 => Fixval{ .F64 = result_value },
Fixval => result_value,
else => @compileError("Op return unimplemented: " ++ @typeName(@TypeOf(result_value))),
};
}
fn parseOpcode(name: []const u8) !std.wasm.Opcode {
if (name[0] != '0' or name[1] != 'x' or name[4] != ' ') {
return error.InvalidCharacter;
}
return @intToEnum(std.wasm.Opcode, try std.fmt.parseInt(u8, name[2..4], 16));
}
const Impl = struct {
const Void = Fixval.Void;
const I32 = Fixval.I32;
const I64 = Fixval.I64;
const U32 = Fixval.U32;
const U64 = Fixval.U64;
const F32 = Fixval.F32;
const F64 = Fixval.F64;
// TODO: replace once Zig can define tuple types
fn Pair(comptime T0: type, comptime T1: type) type {
return extern struct {
_0: T0,
_1: T1,
};
}
// TODO: replace once Zig can define tuple types
fn Triple(comptime T0: type, comptime T1: type, comptime T2: type) type {
return extern struct {
_0: T0,
_1: T1,
_2: T2,
};
}
pub fn @"0x00 unreachable"(ctx: *Execution, arg: Void, pop: *Void) !void {
return error.Unreachable;
}
pub fn @"0x01 nop"(ctx: *Execution, arg: Void, pop: *Void) void {}
pub fn @"0x02 block"(ctx: *Execution, arg: Arg.Type, pop: *Void) void {
// noop, setup metadata only
}
pub fn @"0x03 loop"(ctx: *Execution, arg: Arg.Type, pop: *Void) void {
// noop, setup metadata only
}
pub fn @"0x04 if"(ctx: *Execution, arg: Arg.Type, pop: *I32) void {
if (pop.data == 0) {
ctx.jump(null);
}
}
pub fn @"0x05 else"(ctx: *Execution, arg: Void, pop: *Void) void {
// If we are executing this instruction, it means the `if` block was executed, so we should skip until the end
ctx.jump(null);
}
pub fn @"0x0B end"(ctx: *Execution, arg: Void, pop: *Void) void {
// noop, setup metadata only
// Technically this can return the top value from the stack,
// but it would be immediately pushed on
}
pub fn @"0x0C br"(ctx: *Execution, arg: U32, pop: *Void) void {
ctx.jump(null);
}
pub fn @"0x0D br_if"(ctx: *Execution, arg: U32, pop: *I32) void {
if (pop.data != 0) {
ctx.jump(null);
}
}
pub fn @"0x0E br_table"(ctx: *Execution, arg: Arg.Array, pop: *U32) void {
const idx = std.math.min(pop.data, arg.len - 1); // default to last item. Pretty handy!
ctx.jump(arg.ptr[idx]);
}
pub fn @"0x0F return"(ctx: *Execution, arg: Void, pop: *Void) void {
// Forces unwindCall()
ctx.current_frame.instr = std.math.maxInt(u32);
}
pub fn @"0x10 call"(ctx: *Execution, arg: U32, pop: *Void) !void {
try ctx.initCall(arg.data);
}
pub fn @"0x11 call_indirect"(ctx: *Execution, arg: Arg.U32z, pop: *U32) !void {
const func_id = pop.data;
if (func_id >= ctx.funcs.len) {
return error.IndirectCalleeAbsent;
}
const func = ctx.funcs[func_id];
if (func.func_type != arg.data) {
return error.IndirectCallTypeMismatch;
}
try ctx.initCall(func_id);
}
pub fn @"0x1A drop"(ctx: *Execution, arg: Void, pop: *Fixval) void {
// Do nothing with the popped value
}
pub fn @"0x1B select"(ctx: *Execution, arg: Void, pop: *Triple(Fixval, Fixval, I32)) Fixval {
return if (pop._2.data != 0) pop._0 else pop._1;
}
pub fn @"0x20 local.get"(ctx: *Execution, arg: U32, pop: *Void) Fixval {
return ctx.getLocal(arg.data);
}
pub fn @"0x21 local.set"(ctx: *Execution, arg: U32, pop: *Fixval) void {
ctx.setLocal(arg.data, pop.*);
}
pub fn @"0x22 local.tee"(ctx: *Execution, arg: U32, pop: *Fixval) Fixval {
ctx.setLocal(arg.data, pop.*);
return pop.*;
}
pub fn @"0x23 global.get"(ctx: *Execution, arg: U32, pop: *Void) Fixval {
return ctx.getGlobal(arg.data);
}
pub fn @"0x24 global.set"(ctx: *Execution, arg: U32, pop: *Fixval) void {
ctx.setGlobal(arg.data, pop.*);
}
pub fn @"0x28 i32.load"(ctx: *Execution, mem: Arg.Mem, pop: *U32) !i32 {
return try ctx.memory.load(i32, pop.data, mem.offset);
}
pub fn @"0x29 i64.load"(ctx: *Execution, mem: Arg.Mem, pop: *U32) !i64 {
return try ctx.memory.load(i64, pop.data, mem.offset);
}
pub fn @"0x2A f32.load"(ctx: *Execution, mem: Arg.Mem, pop: *U32) !f32 {
return try ctx.memory.load(f32, pop.data, mem.offset);
}
pub fn @"0x2B f64.load"(ctx: *Execution, mem: Arg.Mem, pop: *U32) !f64 {
return try ctx.memory.load(f64, pop.data, mem.offset);
}
pub fn @"0x2C i32.load8_s"(ctx: *Execution, mem: Arg.Mem, pop: *U32) !i32 {
return try ctx.memory.load(i8, pop.data, mem.offset);
}
pub fn @"0x2D i32.load8_u"(ctx: *Execution, mem: Arg.Mem, pop: *U32) !u32 {
return try ctx.memory.load(u8, pop.data, mem.offset);
}
pub fn @"0x2E i32.load16_s"(ctx: *Execution, mem: Arg.Mem, pop: *U32) !i32 {
return try ctx.memory.load(i16, pop.data, mem.offset);
}
pub fn @"0x2F i32.load16_u"(ctx: *Execution, mem: Arg.Mem, pop: *U32) !u32 {
return try ctx.memory.load(u16, pop.data, mem.offset);
}
pub fn @"0x30 i64.load8_s"(ctx: *Execution, mem: Arg.Mem, pop: *U32) !i64 {
return try ctx.memory.load(i8, pop.data, mem.offset);
}
pub fn @"0x31 i64.load8_u"(ctx: *Execution, mem: Arg.Mem, pop: *U32) !i64 {
return try ctx.memory.load(u8, pop.data, mem.offset);
}
pub fn @"0x32 i64.load16_s"(ctx: *Execution, mem: Arg.Mem, pop: *U32) !i64 {
return try ctx.memory.load(i16, pop.data, mem.offset);
}
pub fn @"0x33 i64.load16_u"(ctx: *Execution, mem: Arg.Mem, pop: *U32) !i64 {
return try ctx.memory.load(u16, pop.data, mem.offset);
}
pub fn @"0x34 i64.load32_s"(ctx: *Execution, mem: Arg.Mem, pop: *U32) !i64 {
return try ctx.memory.load(i32, pop.data, mem.offset);
}
pub fn @"0x35 i64.load32_u"(ctx: *Execution, mem: Arg.Mem, pop: *U32) !i64 {
return try ctx.memory.load(u32, pop.data, mem.offset);
}
pub fn @"0x36 i32.store"(ctx: *Execution, mem: Arg.Mem, pop: *Pair(U32, I32)) !void {
return try ctx.memory.store(i32, pop._0.data, mem.offset, pop._1.data);
}
pub fn @"0x37 i64.store"(ctx: *Execution, mem: Arg.Mem, pop: *Pair(U32, I64)) !void {
return try ctx.memory.store(i64, pop._0.data, mem.offset, pop._1.data);
}
pub fn @"0x38 f32.store"(ctx: *Execution, mem: Arg.Mem, pop: *Pair(U32, F32)) !void {
return try ctx.memory.store(f32, pop._0.data, mem.offset, pop._1.data);
}
pub fn @"0x39 f64.store"(ctx: *Execution, mem: Arg.Mem, pop: *Pair(U32, F64)) !void {
return try ctx.memory.store(f64, pop._0.data, mem.offset, pop._1.data);
}
pub fn @"0x3A i32.store8"(ctx: *Execution, mem: Arg.Mem, pop: *Pair(U32, I32)) !void {
return try ctx.memory.store(i8, pop._0.data, mem.offset, @truncate(i8, pop._1.data));
}
pub fn @"0x3B i32.store16"(ctx: *Execution, mem: Arg.Mem, pop: *Pair(U32, I32)) !void {
return try ctx.memory.store(i16, pop._0.data, mem.offset, @truncate(i16, pop._1.data));
}
pub fn @"0x3C i64.store8"(ctx: *Execution, mem: Arg.Mem, pop: *Pair(U32, I64)) !void {
return try ctx.memory.store(i8, pop._0.data, mem.offset, @truncate(i8, pop._1.data));
}
pub fn @"0x3D i64.store16"(ctx: *Execution, mem: Arg.Mem, pop: *Pair(U32, I64)) !void {
return try ctx.memory.store(i16, pop._0.data, mem.offset, @truncate(i16, pop._1.data));
}
pub fn @"0x3E i64.store32"(ctx: *Execution, mem: Arg.Mem, pop: *Pair(U32, I64)) !void {
return try ctx.memory.store(i32, pop._0.data, mem.offset, @truncate(i32, pop._1.data));
}
pub fn @"0x3F memory.size"(ctx: *Execution, arg: Void, pop: *Void) u32 {
return ctx.memory.pageCount();
}
pub fn @"0x40 memory.grow"(ctx: *Execution, arg: Void, pop: *U32) i32 {
ctx.memory.grow(@intCast(u16, pop.data)) catch |err| switch (err) {
error.OutOfMemory => return @as(i32, -1),
};
return ctx.memory.pageCount();
}
pub fn @"0x41 i32.const"(ctx: *Execution, arg: I32, pop: *Void) i32 {
return arg.data;
}
pub fn @"0x42 i64.const"(ctx: *Execution, arg: I64, pop: *Void) i64 {
return arg.data;
}
pub fn @"0x43 f32.const"(ctx: *Execution, arg: F32, pop: *Void) f32 {
return arg.data;
}
pub fn @"0x44 f64.const"(ctx: *Execution, arg: F64, pop: *Void) f64 {
return arg.data;
}
pub fn @"0x45 i32.eqz"(ctx: *Execution, arg: Void, pop: *I32) i32 {
return @boolToInt(pop.data == 0);
}
pub fn @"0x46 i32.eq"(ctx: *Execution, arg: Void, pop: *Pair(I32, I32)) i32 {
return @boolToInt(pop._0.data == pop._1.data);
}
pub fn @"0x47 i32.ne"(ctx: *Execution, arg: Void, pop: *Pair(I32, I32)) i32 {
return @boolToInt(pop._0.data != pop._1.data);
}
pub fn @"0x48 i32.lt_s"(ctx: *Execution, arg: Void, pop: *Pair(I32, I32)) i32 {
return @boolToInt(pop._0.data < pop._1.data);
}
pub fn @"0x49 i32.lt_u"(ctx: *Execution, arg: Void, pop: *Pair(U32, U32)) i32 {
return @boolToInt(pop._0.data < pop._1.data);
}
pub fn @"0x4A i32.gt_s"(ctx: *Execution, arg: Void, pop: *Pair(I32, I32)) i32 {
return @boolToInt(pop._0.data > pop._1.data);
}
pub fn @"0x4B i32.gt_u"(ctx: *Execution, arg: Void, pop: *Pair(U32, U32)) i32 {
return @boolToInt(pop._0.data > pop._1.data);
}
pub fn @"0x4C i32.le_s"(ctx: *Execution, arg: Void, pop: *Pair(I32, I32)) i32 {
return @boolToInt(pop._0.data <= pop._1.data);
}
pub fn @"0x4D i32.le_u"(ctx: *Execution, arg: Void, pop: *Pair(U32, U32)) i32 {
return @boolToInt(pop._0.data <= pop._1.data);
}
pub fn @"0x4E i32.ge_s"(ctx: *Execution, arg: Void, pop: *Pair(I32, I32)) i32 {
return @boolToInt(pop._0.data >= pop._1.data);
}
pub fn @"0x4F i32.ge_u"(ctx: *Execution, arg: Void, pop: *Pair(U32, U32)) i32 {
return @boolToInt(pop._0.data >= pop._1.data);
}
pub fn @"0x50 i64.eqz"(ctx: *Execution, arg: Void, pop: *I64) i32 {
return @boolToInt(pop.data == 0);
}
pub fn @"0x51 i64.eq"(ctx: *Execution, arg: Void, pop: *Pair(I64, I64)) i32 {
return @boolToInt(pop._0.data == pop._1.data);
}
pub fn @"0x52 i64.ne"(ctx: *Execution, arg: Void, pop: *Pair(I64, I64)) i32 {
return @boolToInt(pop._0.data != pop._1.data);
}
pub fn @"0x53 i64.lt_s"(ctx: *Execution, arg: Void, pop: *Pair(I64, I64)) i32 {
return @boolToInt(pop._0.data < pop._1.data);
}
pub fn @"0x54 i64.lt_u"(ctx: *Execution, arg: Void, pop: *Pair(U64, U64)) i32 {
return @boolToInt(pop._0.data < pop._1.data);
}
pub fn @"0x55 i64.gt_s"(ctx: *Execution, arg: Void, pop: *Pair(I64, I64)) i32 {
return @boolToInt(pop._0.data > pop._1.data);
}
pub fn @"0x56 i64.gt_u"(ctx: *Execution, arg: Void, pop: *Pair(U64, U64)) i32 {
return @boolToInt(pop._0.data > pop._1.data);
}
pub fn @"0x57 i64.le_s"(ctx: *Execution, arg: Void, pop: *Pair(I64, I64)) i32 {
return @boolToInt(pop._0.data <= pop._1.data);
}
pub fn @"0x58 i64.le_u"(ctx: *Execution, arg: Void, pop: *Pair(U64, U64)) i32 {
return @boolToInt(pop._0.data <= pop._1.data);
}
pub fn @"0x59 i64.ge_s"(ctx: *Execution, arg: Void, pop: *Pair(I64, I64)) i32 {
return @boolToInt(pop._0.data >= pop._1.data);
}
pub fn @"0x5A i64.ge_u"(ctx: *Execution, arg: Void, pop: *Pair(U64, U64)) i32 {
return @boolToInt(pop._0.data >= pop._1.data);
}
pub fn @"0x5B f32.eq"(ctx: *Execution, arg: Void, pop: *Pair(F32, F32)) i32 {
return @boolToInt(pop._0.data == pop._1.data);
}
pub fn @"0x5C f32.ne"(ctx: *Execution, arg: Void, pop: *Pair(F32, F32)) i32 {
return @boolToInt(pop._0.data != pop._1.data);
}
pub fn @"0x5D f32.lt"(ctx: *Execution, arg: Void, pop: *Pair(F32, F32)) i32 {
return @boolToInt(pop._0.data < pop._1.data);
}
pub fn @"0x5E f32.gt"(ctx: *Execution, arg: Void, pop: *Pair(F32, F32)) i32 {
return @boolToInt(pop._0.data > pop._1.data);
}
pub fn @"0x5F f32.le"(ctx: *Execution, arg: Void, pop: *Pair(F32, F32)) i32 {
return @boolToInt(pop._0.data <= pop._1.data);
}
pub fn @"0x60 f32.ge"(ctx: *Execution, arg: Void, pop: *Pair(F32, F32)) i32 {
return @boolToInt(pop._0.data >= pop._1.data);
}
pub fn @"0x61 f64.eq"(ctx: *Execution, arg: Void, pop: *Pair(F64, F64)) i32 {
return @boolToInt(pop._0.data == pop._1.data);
}
pub fn @"0x62 f64.ne"(ctx: *Execution, arg: Void, pop: *Pair(F64, F64)) i32 {
return @boolToInt(pop._0.data != pop._1.data);
}
pub fn @"0x63 f64.lt"(ctx: *Execution, arg: Void, pop: *Pair(F64, F64)) i32 {
return @boolToInt(pop._0.data < pop._1.data);
}
pub fn @"0x64 f64.gt"(ctx: *Execution, arg: Void, pop: *Pair(F64, F64)) i32 {
return @boolToInt(pop._0.data > pop._1.data);
}
pub fn @"0x65 f64.le"(ctx: *Execution, arg: Void, pop: *Pair(F64, F64)) i32 {
return @boolToInt(pop._0.data <= pop._1.data);
}
pub fn @"0x66 f64.ge"(ctx: *Execution, arg: Void, pop: *Pair(F64, F64)) i32 {
return @boolToInt(pop._0.data >= pop._1.data);
}
pub fn @"0x67 i32.clz"(ctx: *Execution, arg: Void, pop: *I32) i32 {
return @clz(i32, pop.data);
}
pub fn @"0x68 i32.ctz"(ctx: *Execution, arg: Void, pop: *I32) i32 {
return @ctz(i32, pop.data);
}
pub fn @"0x69 i32.popcnt"(ctx: *Execution, arg: Void, pop: *I32) i32 {
return @popCount(i32, pop.data);
}
pub fn @"0x6A i32.add"(ctx: *Execution, arg: Void, pop: *Pair(I32, I32)) i32 {
return pop._0.data +% pop._1.data;
}
pub fn @"0x6B i32.sub"(ctx: *Execution, arg: Void, pop: *Pair(I32, I32)) i32 {
return pop._0.data -% pop._1.data;
}
pub fn @"0x6C i32.mul"(ctx: *Execution, arg: Void, pop: *Pair(I32, I32)) i32 {
return pop._0.data *% pop._1.data;
}
pub fn @"0x6D i32.div_s"(ctx: *Execution, arg: Void, pop: *Pair(I32, I32)) !i32 {
if (pop._1.data == 0) return error.DivisionByZero;
if (pop._0.data == std.math.minInt(i32) and pop._1.data == -1) return error.Overflow;
return @divTrunc(pop._0.data, pop._1.data);
}
pub fn @"0x6E i32.div_u"(ctx: *Execution, arg: Void, pop: *Pair(U32, U32)) !u32 {
if (pop._1.data == 0) return error.DivisionByZero;
return @divFloor(pop._0.data, pop._1.data);
}
pub fn @"0x6F i32.rem_s"(ctx: *Execution, arg: Void, pop: *Pair(I32, I32)) !i32 {
if (pop._1.data == 0) return error.DivisionByZero;
const abs_0 = std.math.absCast(pop._0.data);
const abs_1 = std.math.absCast(pop._1.data);
const val = @intCast(i32, @rem(abs_0, abs_1));
return if (pop._0.data < 0) -val else val;
}
pub fn @"0x70 i32.rem_u"(ctx: *Execution, arg: Void, pop: *Pair(U32, U32)) !u32 {
if (pop._1.data == 0) return error.DivisionByZero;
return @mod(pop._0.data, pop._1.data);
}
pub fn @"0x71 i32.and"(ctx: *Execution, arg: Void, pop: *Pair(I32, I32)) i32 {
return pop._0.data & pop._1.data;
}
pub fn @"0x72 i32.or"(ctx: *Execution, arg: Void, pop: *Pair(I32, I32)) i32 {
return pop._0.data | pop._1.data;
}
pub fn @"0x73 i32.xor"(ctx: *Execution, arg: Void, pop: *Pair(I32, I32)) i32 {
return pop._0.data ^ pop._1.data;
}
pub fn @"0x74 i32.shl"(ctx: *Execution, arg: Void, pop: *Pair(I32, U32)) i32 {
return pop._0.data << @truncate(u5, pop._1.data);
}
pub fn @"0x75 i32.shr_s"(ctx: *Execution, arg: Void, pop: *Pair(I32, U32)) i32 {
return pop._0.data >> @truncate(u5, pop._1.data);
}
pub fn @"0x76 i32.shr_u"(ctx: *Execution, arg: Void, pop: *Pair(U32, U32)) u32 {
return pop._0.data >> @truncate(u5, pop._1.data);
}
pub fn @"0x77 i32.rotl"(ctx: *Execution, arg: Void, pop: *Pair(U32, U32)) u32 {
return std.math.rotl(u32, pop._0.data, @truncate(u6, pop._1.data));
}
pub fn @"0x78 i32.rotr"(ctx: *Execution, arg: Void, pop: *Pair(U32, U32)) u32 {
return std.math.rotr(u32, pop._0.data, @truncate(u6, pop._1.data));
}
pub fn @"0x79 i64.clz"(ctx: *Execution, arg: Void, pop: *I64) i64 {
return @clz(i64, pop.data);
}
pub fn @"0x7A i64.ctz"(ctx: *Execution, arg: Void, pop: *I64) i64 {
return @ctz(i64, pop.data);
}
pub fn @"0x7B i64.popcnt"(ctx: *Execution, arg: Void, pop: *I64) i64 {
return @popCount(i64, pop.data);
}
pub fn @"0x7C i64.add"(ctx: *Execution, arg: Void, pop: *Pair(I64, I64)) i64 {
return pop._0.data +% pop._1.data;
}
pub fn @"0x7D i64.sub"(ctx: *Execution, arg: Void, pop: *Pair(I64, I64)) i64 {
return pop._0.data -% pop._1.data;
}
pub fn @"0x7E i64.mul"(ctx: *Execution, arg: Void, pop: *Pair(I64, I64)) i64 {
return pop._0.data *% pop._1.data;
}
pub fn @"0x7F i64.div_s"(ctx: *Execution, arg: Void, pop: *Pair(I64, I64)) !i64 {
if (pop._1.data == 0) return error.DivisionByZero;
if (pop._0.data == std.math.minInt(i64) and pop._1.data == -1) return error.Overflow;
return @divTrunc(pop._0.data, pop._1.data);
}
pub fn @"0x80 i64.div_u"(ctx: *Execution, arg: Void, pop: *Pair(U64, U64)) !u64 {
if (pop._1.data == 0) return error.DivisionByZero;
return @divFloor(pop._0.data, pop._1.data);
}
pub fn @"0x81 i64.rem_s"(ctx: *Execution, arg: Void, pop: *Pair(I64, I64)) !i64 {
if (pop._1.data == 0) return error.DivisionByZero;
const abs_0 = std.math.absCast(pop._0.data);
const abs_1 = std.math.absCast(pop._1.data);
const val = @intCast(i64, @rem(abs_0, abs_1));
return if (pop._0.data < 0) -val else val;
}
pub fn @"0x82 i64.rem_u"(ctx: *Execution, arg: Void, pop: *Pair(U64, U64)) !u64 {
if (pop._1.data == 0) return error.DivisionByZero;
return @mod(pop._0.data, pop._1.data);
}
pub fn @"0x83 i64.and"(ctx: *Execution, arg: Void, pop: *Pair(I64, I64)) i64 {
return pop._0.data & pop._1.data;
}
pub fn @"0x84 i64.or"(ctx: *Execution, arg: Void, pop: *Pair(I64, I64)) i64 {
return pop._0.data | pop._1.data;
}
pub fn @"0x85 i64.xor"(ctx: *Execution, arg: Void, pop: *Pair(I64, I64)) i64 {
return pop._0.data ^ pop._1.data;
}
pub fn @"0x86 i64.shl"(ctx: *Execution, arg: Void, pop: *Pair(I64, U64)) i64 {
return pop._0.data << @truncate(u6, pop._1.data);
}
pub fn @"0x87 i64.shr_s"(ctx: *Execution, arg: Void, pop: *Pair(I64, U64)) i64 {
return pop._0.data >> @truncate(u6, pop._1.data);
}
pub fn @"0x88 i64.shr_u"(ctx: *Execution, arg: Void, pop: *Pair(U64, U64)) u64 {
return pop._0.data >> @truncate(u6, pop._1.data);
}
pub fn @"0x89 i64.rotl"(ctx: *Execution, arg: Void, pop: *Pair(U64, U64)) u64 {
return std.math.rotl(u64, pop._0.data, @truncate(u7, pop._1.data));
}
pub fn @"0x8A i64.rotr"(ctx: *Execution, arg: Void, pop: *Pair(U64, U64)) u64 {
return std.math.rotr(u64, pop._0.data, @truncate(u7, pop._1.data));
}
pub fn @"0x8B f32.abs"(ctx: *Execution, arg: Void, pop: *F32) f32 {
return @fabs(pop.data);
}
pub fn @"0x8C f32.neg"(ctx: *Execution, arg: Void, pop: *F32) f32 {
return -pop.data;
}
pub fn @"0x8D f32.ceil"(ctx: *Execution, arg: Void, pop: *F32) f32 {
return @ceil(pop.data);
}
pub fn @"0x8E f32.floor"(ctx: *Execution, arg: Void, pop: *F32) f32 {
return @floor(pop.data);
}
pub fn @"0x8F f32.trunc"(ctx: *Execution, arg: Void, pop: *F32) f32 {
return @trunc(pop.data);
}
pub fn @"0x90 f32.nearest"(ctx: *Execution, arg: Void, pop: *F32) f32 {
return @round(pop.data);
}
pub fn @"0x91 f32.sqrt"(ctx: *Execution, arg: Void, pop: *F32) f32 {
return @sqrt(pop.data);
}
pub fn @"0x92 f32.add"(ctx: *Execution, arg: Void, pop: *Pair(F32, F32)) f32 {
return pop._0.data + pop._1.data;
}
pub fn @"0x93 f32.sub"(ctx: *Execution, arg: Void, pop: *Pair(F32, F32)) f32 {
return pop._0.data - pop._1.data;
}
pub fn @"0x94 f32.mul"(ctx: *Execution, arg: Void, pop: *Pair(F32, F32)) f32 {
return pop._0.data * pop._1.data;
}
pub fn @"0x95 f32.div"(ctx: *Execution, arg: Void, pop: *Pair(F32, F32)) f32 {
return pop._0.data / pop._1.data;
}
pub fn @"0x96 f32.min"(ctx: *Execution, arg: Void, pop: *Pair(F32, F32)) f32 {
return std.math.min(pop._0.data, pop._1.data);
}
pub fn @"0x97 f32.max"(ctx: *Execution, arg: Void, pop: *Pair(F32, F32)) f32 {
return std.math.max(pop._0.data, pop._1.data);
}
pub fn @"0x98 f32.copysign"(ctx: *Execution, arg: Void, pop: *Pair(F32, F32)) f32 {
return std.math.copysign(f32, pop._0.data, pop._1.data);
}
pub fn @"0x99 f64.abs"(ctx: *Execution, arg: Void, pop: *F64) f64 {
return @fabs(pop.data);
}
pub fn @"0x9A f64.neg"(ctx: *Execution, arg: Void, pop: *F64) f64 {
return -pop.data;
}
pub fn @"0x9B f64.ceil"(ctx: *Execution, arg: Void, pop: *F64) f64 {
return @ceil(pop.data);
}
pub fn @"0x9C f64.floor"(ctx: *Execution, arg: Void, pop: *F64) f64 {
return @floor(pop.data);
}
pub fn @"0x9D f64.trunc"(ctx: *Execution, arg: Void, pop: *F64) f64 {
return @trunc(pop.data);
}
pub fn @"0x9E f64.nearest"(ctx: *Execution, arg: Void, pop: *F64) f64 {
return @round(pop.data);
}
pub fn @"0x9F f64.sqrt"(ctx: *Execution, arg: Void, pop: *F64) f64 {
return @sqrt(pop.data);
}
pub fn @"0xA0 f64.add"(ctx: *Execution, arg: Void, pop: *Pair(F64, F64)) f64 {
return pop._0.data + pop._1.data;
}
pub fn @"0xA1 f64.sub"(ctx: *Execution, arg: Void, pop: *Pair(F64, F64)) f64 {
return pop._0.data - pop._1.data;
}
pub fn @"0xA2 f64.mul"(ctx: *Execution, arg: Void, pop: *Pair(F64, F64)) f64 {
return pop._0.data * pop._1.data;
}
pub fn @"0xA3 f64.div"(ctx: *Execution, arg: Void, pop: *Pair(F64, F64)) f64 {
return pop._0.data / pop._1.data;
}
pub fn @"0xA4 f64.min"(ctx: *Execution, arg: Void, pop: *Pair(F64, F64)) f64 {
return std.math.min(pop._0.data, pop._1.data);
}
pub fn @"0xA5 f64.max"(ctx: *Execution, arg: Void, pop: *Pair(F64, F64)) f64 {
return std.math.max(pop._0.data, pop._1.data);
}
pub fn @"0xA6 f64.copysign"(ctx: *Execution, arg: Void, pop: *Pair(F64, F64)) f64 {
return std.math.copysign(f64, pop._0.data, pop._1.data);
}
pub fn @"0xA7 i32.wrap_i64"(ctx: *Execution, arg: Void, pop: *U64) u32 {
return @truncate(u32, std.math.maxInt(u32) & pop.data);
}
pub fn @"0xA8 i32.trunc_f32_s"(ctx: *Execution, arg: Void, pop: *F32) !i32 {
return floatToInt(i32, f32, pop.data);
}
pub fn @"0xA9 i32.trunc_f32_u"(ctx: *Execution, arg: Void, pop: *F32) !u32 {
return floatToInt(u32, f32, pop.data);
}
pub fn @"0xAA i32.trunc_f64_s"(ctx: *Execution, arg: Void, pop: *F64) !i32 {
return floatToInt(i32, f64, pop.data);
}
pub fn @"0xAB i32.trunc_f64_u"(ctx: *Execution, arg: Void, pop: *F64) !u32 {
return floatToInt(u32, f64, pop.data);
}
pub fn @"0xAC i64.extend_i32_s"(ctx: *Execution, arg: Void, pop: *I64) i64 {
return pop.data;
}
pub fn @"0xAD i64.extend_i32_u"(ctx: *Execution, arg: Void, pop: *U32) u64 {
return pop.data;
}
pub fn @"0xAE i64.trunc_f32_s"(ctx: *Execution, arg: Void, pop: *F32) !i64 {
return floatToInt(i64, f32, pop.data);
}
pub fn @"0xAF i64.trunc_f32_u"(ctx: *Execution, arg: Void, pop: *F32) !u64 {
return floatToInt(u64, f32, pop.data);
}
pub fn @"0xB0 i64.trunc_f64_s"(ctx: *Execution, arg: Void, pop: *F64) !i64 {
return floatToInt(i64, f64, pop.data);
}
pub fn @"0xB1 i64.trunc_f64_u"(ctx: *Execution, arg: Void, pop: *F64) !u64 {
return floatToInt(u64, f64, pop.data);
}
pub fn @"0xB2 f32.convert_i32_s"(ctx: *Execution, arg: Void, pop: *I32) f32 {
return @intToFloat(f32, pop.data);
}
pub fn @"0xB3 f32.convert_i32_u"(ctx: *Execution, arg: Void, pop: *U32) f32 {
return @intToFloat(f32, pop.data);
}
pub fn @"0xB4 f32.convert_i64_s"(ctx: *Execution, arg: Void, pop: *I64) f32 {
return @intToFloat(f32, pop.data);
}
pub fn @"0xB5 f32.convert_i64_u"(ctx: *Execution, arg: Void, pop: *U64) f32 {
return @intToFloat(f32, pop.data);
}
pub fn @"0xB6 f32.demote_f64"(ctx: *Execution, arg: Void, pop: *F64) f32 {
return @floatCast(f32, pop.data);
}
pub fn @"0xB7 f64.convert_i32_s"(ctx: *Execution, arg: Void, pop: *I32) f64 {
return @intToFloat(f64, pop.data);
}
pub fn @"0xB8 f64.convert_i32_u"(ctx: *Execution, arg: Void, pop: *U32) f64 {
return @intToFloat(f64, pop.data);
}
pub fn @"0xB9 f64.convert_i64_s"(ctx: *Execution, arg: Void, pop: *I64) f64 {
return @intToFloat(f64, pop.data);
}
pub fn @"0xBA f64.convert_i64_u"(ctx: *Execution, arg: Void, pop: *U64) f64 {
return @intToFloat(f64, pop.data);
}
pub fn @"0xBB f64.promote_f32"(ctx: *Execution, arg: Void, pop: *F32) f64 {
return @floatCast(f64, pop.data);
}
pub fn @"0xBC i32.reinterpret_f32"(ctx: *Execution, arg: Void, pop: *F32) i32 {
return @bitCast(i32, pop.data);
}
pub fn @"0xBD i64.reinterpret_f64"(ctx: *Execution, arg: Void, pop: *F64) i64 {
return @bitCast(i64, pop.data);
}
pub fn @"0xBE f32.reinterpret_i32"(ctx: *Execution, arg: Void, pop: *I32) f32 {
return @bitCast(f32, pop.data);
}
pub fn @"0xBF f64.reinterpret_i64"(ctx: *Execution, arg: Void, pop: *I64) f64 {
return @bitCast(f64, pop.data);
}
fn floatToInt(comptime Dst: type, comptime Src: type, val: Src) !Dst {
if (!std.math.isFinite(val) or val > std.math.maxInt(Dst) or val < std.math.minInt(Dst)) {
return error.InvalidConversionToInteger;
}
return @floatToInt(Dst, val);
}
}; | src/op.zig |
const builtin = @import("builtin");
const std = @import("std.zig");
const os = std.os;
const mem = std.mem;
const base64 = std.base64;
const crypto = std.crypto;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
pub const path = @import("fs/path.zig");
pub const File = @import("fs/file.zig").File;
pub const symLink = os.symlink;
pub const symLinkC = os.symlinkC;
pub const deleteFile = os.unlink;
pub const deleteFileC = os.unlinkC;
pub const rename = os.rename;
pub const renameC = os.renameC;
pub const renameW = os.renameW;
pub const realpath = os.realpath;
pub const realpathC = os.realpathC;
pub const realpathW = os.realpathW;
pub const getAppDataDir = @import("fs/get_app_data_dir.zig").getAppDataDir;
pub const GetAppDataDirError = @import("fs/get_app_data_dir.zig").GetAppDataDirError;
/// This represents the maximum size of a UTF-8 encoded file path.
/// All file system operations which return a path are guaranteed to
/// fit into a UTF-8 encoded array of this length.
/// path being too long if it is this 0long
pub const MAX_PATH_BYTES = switch (builtin.os) {
.linux, .macosx, .ios, .freebsd, .netbsd => os.PATH_MAX,
// Each UTF-16LE character may be expanded to 3 UTF-8 bytes.
// If it would require 4 UTF-8 bytes, then there would be a surrogate
// pair in the UTF-16LE, and we (over)account 3 bytes for it that way.
// +1 for the null byte at the end, which can be encoded in 1 byte.
.windows => os.windows.PATH_MAX_WIDE * 3 + 1,
else => @compileError("Unsupported OS"),
};
// here we replace the standard +/ with -_ so that it can be used in a file name
const b64_fs_encoder = base64.Base64Encoder.init("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_", base64.standard_pad_char);
/// TODO remove the allocator requirement from this API
pub fn atomicSymLink(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) !void {
if (symLink(existing_path, new_path)) {
return;
} else |err| switch (err) {
error.PathAlreadyExists => {},
else => return err, // TODO zig should know this set does not include PathAlreadyExists
}
const dirname = path.dirname(new_path) orelse ".";
var rand_buf: [12]u8 = undefined;
const tmp_path = try allocator.alloc(u8, dirname.len + 1 + base64.Base64Encoder.calcSize(rand_buf.len));
defer allocator.free(tmp_path);
mem.copy(u8, tmp_path[0..], dirname);
tmp_path[dirname.len] = path.sep;
while (true) {
try crypto.randomBytes(rand_buf[0..]);
b64_fs_encoder.encode(tmp_path[dirname.len + 1 ..], rand_buf);
if (symLink(existing_path, tmp_path)) {
return rename(tmp_path, new_path);
} else |err| switch (err) {
error.PathAlreadyExists => continue,
else => return err, // TODO zig should know this set does not include PathAlreadyExists
}
}
}
// TODO fix enum literal not casting to error union
const PrevStatus = enum {
stale,
fresh,
};
pub fn updateFile(source_path: []const u8, dest_path: []const u8) !PrevStatus {
return updateFileMode(source_path, dest_path, null);
}
/// Check the file size, mtime, and mode of `source_path` and `dest_path`. If they are equal, does nothing.
/// Otherwise, atomically copies `source_path` to `dest_path`. The destination file gains the mtime,
/// atime, and mode of the source file so that the next call to `updateFile` will not need a copy.
/// Returns the previous status of the file before updating.
/// If any of the directories do not exist for dest_path, they are created.
/// TODO https://github.com/ziglang/zig/issues/2885
pub fn updateFileMode(source_path: []const u8, dest_path: []const u8, mode: ?File.Mode) !PrevStatus {
var src_file = try File.open(source_path, File.READ);
defer src_file.close();
const src_stat = try src_file.stat();
check_dest_stat: {
const dest_stat = blk: {
var dest_file = File.open(dest_path, File.READ) catch |err| switch (err) {
error.FileNotFound => break :check_dest_stat,
else => |e| return e,
};
defer dest_file.close();
break :blk try dest_file.stat();
};
if (src_stat.size == dest_stat.size and
src_stat.mtime == dest_stat.mtime and
src_stat.mode == dest_stat.mode)
{
return PrevStatus.fresh;
}
}
const actual_mode = mode orelse src_stat.mode;
// TODO this logic could be made more efficient by calling makePath, once
// that API does not require an allocator
var atomic_file = make_atomic_file: while (true) {
const af = AtomicFile.init(dest_path, actual_mode) catch |err| switch (err) {
error.FileNotFound => {
var p = dest_path;
while (path.dirname(p)) |dirname| {
makeDir(dirname) catch |e| switch (e) {
error.FileNotFound => {
p = dirname;
continue;
},
else => return e,
};
continue :make_atomic_file;
} else {
return err;
}
},
else => |e| return e,
};
break af;
} else unreachable;
defer atomic_file.deinit();
const in_stream = &src_file.inStream().stream;
var buf: [mem.page_size * 6]u8 = undefined;
while (true) {
const amt = try in_stream.readFull(buf[0..]);
try atomic_file.file.write(buf[0..amt]);
if (amt != buf.len) {
try atomic_file.file.updateTimes(src_stat.atime, src_stat.mtime);
try atomic_file.finish();
return PrevStatus.stale;
}
}
}
/// Guaranteed to be atomic. However until https://patchwork.kernel.org/patch/9636735/ is
/// merged and readily available,
/// there is a possibility of power loss or application termination leaving temporary files present
/// in the same directory as dest_path.
/// Destination file will have the same mode as the source file.
pub fn copyFile(source_path: []const u8, dest_path: []const u8) !void {
var in_file = try File.open(source_path, File.READ);
defer in_file.close();
const mode = try in_file.mode();
const in_stream = &in_file.inStream().stream;
var atomic_file = try AtomicFile.init(dest_path, mode);
defer atomic_file.deinit();
var buf: [mem.page_size]u8 = undefined;
while (true) {
const amt = try in_stream.readFull(buf[0..]);
try atomic_file.file.write(buf[0..amt]);
if (amt != buf.len) {
return atomic_file.finish();
}
}
}
/// Guaranteed to be atomic. However until https://patchwork.kernel.org/patch/9636735/ is
/// merged and readily available,
/// there is a possibility of power loss or application termination leaving temporary files present
pub fn copyFileMode(source_path: []const u8, dest_path: []const u8, mode: File.Mode) !void {
var in_file = try File.openRead(source_path);
defer in_file.close();
var atomic_file = try AtomicFile.init(dest_path, mode);
defer atomic_file.deinit();
var buf: [mem.page_size * 6]u8 = undefined;
while (true) {
const amt = try in_file.read(buf[0..]);
try atomic_file.file.write(buf[0..amt]);
if (amt != buf.len) {
return atomic_file.finish();
}
}
}
pub const AtomicFile = struct {
file: File,
tmp_path_buf: [MAX_PATH_BYTES]u8,
dest_path: []const u8,
finished: bool,
const InitError = File.OpenError;
/// dest_path must remain valid for the lifetime of AtomicFile
/// call finish to atomically replace dest_path with contents
/// TODO once we have null terminated pointers, use the
/// openWriteNoClobberN function
pub fn init(dest_path: []const u8, mode: File.Mode) InitError!AtomicFile {
const dirname = path.dirname(dest_path);
var rand_buf: [12]u8 = undefined;
const dirname_component_len = if (dirname) |d| d.len + 1 else 0;
const encoded_rand_len = comptime base64.Base64Encoder.calcSize(rand_buf.len);
const tmp_path_len = dirname_component_len + encoded_rand_len;
var tmp_path_buf: [MAX_PATH_BYTES]u8 = undefined;
if (tmp_path_len >= tmp_path_buf.len) return error.NameTooLong;
if (dirname) |dir| {
mem.copy(u8, tmp_path_buf[0..], dir);
tmp_path_buf[dir.len] = path.sep;
}
tmp_path_buf[tmp_path_len] = 0;
while (true) {
try crypto.randomBytes(rand_buf[0..]);
b64_fs_encoder.encode(tmp_path_buf[dirname_component_len..tmp_path_len], rand_buf);
const file = File.openWriteNoClobberC(&tmp_path_buf, mode) catch |err| switch (err) {
error.PathAlreadyExists => continue,
// TODO zig should figure out that this error set does not include PathAlreadyExists since
// it is handled in the above switch
else => return err,
};
return AtomicFile{
.file = file,
.tmp_path_buf = tmp_path_buf,
.dest_path = dest_path,
.finished = false,
};
}
}
/// always call deinit, even after successful finish()
pub fn deinit(self: *AtomicFile) void {
if (!self.finished) {
self.file.close();
deleteFileC(&self.tmp_path_buf) catch {};
self.finished = true;
}
}
pub fn finish(self: *AtomicFile) !void {
assert(!self.finished);
self.file.close();
self.finished = true;
if (os.windows.is_the_target) {
const dest_path_w = try os.windows.sliceToPrefixedFileW(self.dest_path);
const tmp_path_w = try os.windows.cStrToPrefixedFileW(&self.tmp_path_buf);
return os.renameW(&tmp_path_w, &dest_path_w);
}
const dest_path_c = try os.toPosixPath(self.dest_path);
return os.renameC(&self.tmp_path_buf, &dest_path_c);
}
};
const default_new_dir_mode = 0o755;
/// Create a new directory.
pub fn makeDir(dir_path: []const u8) !void {
return os.mkdir(dir_path, default_new_dir_mode);
}
/// Same as `makeDir` except the parameter is a null-terminated UTF8-encoded string.
pub fn makeDirC(dir_path: [*]const u8) !void {
return os.mkdirC(dir_path, default_new_dir_mode);
}
/// Same as `makeDir` except the parameter is a null-terminated UTF16LE-encoded string.
pub fn makeDirW(dir_path: [*]const u16) !void {
return os.mkdirW(dir_path, default_new_dir_mode);
}
/// Calls makeDir recursively to make an entire path. Returns success if the path
/// already exists and is a directory.
/// This function is not atomic, and if it returns an error, the file system may
/// have been modified regardless.
/// TODO determine if we can remove the allocator requirement from this function
pub fn makePath(allocator: *Allocator, full_path: []const u8) !void {
const resolved_path = try path.resolve(allocator, [_][]const u8{full_path});
defer allocator.free(resolved_path);
var end_index: usize = resolved_path.len;
while (true) {
makeDir(resolved_path[0..end_index]) catch |err| switch (err) {
error.PathAlreadyExists => {
// TODO stat the file and return an error if it's not a directory
// this is important because otherwise a dangling symlink
// could cause an infinite loop
if (end_index == resolved_path.len) return;
},
error.FileNotFound => {
// march end_index backward until next path component
while (true) {
end_index -= 1;
if (path.isSep(resolved_path[end_index])) break;
}
continue;
},
else => return err,
};
if (end_index == resolved_path.len) return;
// march end_index forward until next path component
while (true) {
end_index += 1;
if (end_index == resolved_path.len or path.isSep(resolved_path[end_index])) break;
}
}
}
/// Returns `error.DirNotEmpty` if the directory is not empty.
/// To delete a directory recursively, see `deleteTree`.
pub fn deleteDir(dir_path: []const u8) !void {
return os.rmdir(dir_path);
}
/// Same as `deleteDir` except the parameter is a null-terminated UTF8-encoded string.
pub fn deleteDirC(dir_path: [*]const u8) !void {
return os.rmdirC(dir_path);
}
/// Same as `deleteDir` except the parameter is a null-terminated UTF16LE-encoded string.
pub fn deleteDirW(dir_path: [*]const u16) !void {
return os.rmdirW(dir_path);
}
const DeleteTreeError = error{
OutOfMemory,
AccessDenied,
FileTooBig,
IsDir,
SymLinkLoop,
ProcessFdQuotaExceeded,
NameTooLong,
SystemFdQuotaExceeded,
NoDevice,
SystemResources,
NoSpaceLeft,
PathAlreadyExists,
ReadOnlyFileSystem,
NotDir,
FileNotFound,
FileSystem,
FileBusy,
DirNotEmpty,
DeviceBusy,
/// On Windows, file paths must be valid Unicode.
InvalidUtf8,
/// On Windows, file paths cannot contain these characters:
/// '/', '*', '?', '"', '<', '>', '|'
BadPathName,
Unexpected,
};
/// Whether `full_path` describes a symlink, file, or directory, this function
/// removes it. If it cannot be removed because it is a non-empty directory,
/// this function recursively removes its entries and then tries again.
/// TODO determine if we can remove the allocator requirement
/// https://github.com/ziglang/zig/issues/2886
pub fn deleteTree(allocator: *Allocator, full_path: []const u8) DeleteTreeError!void {
start_over: while (true) {
var got_access_denied = false;
// First, try deleting the item as a file. This way we don't follow sym links.
if (deleteFile(full_path)) {
return;
} else |err| switch (err) {
error.FileNotFound => return,
error.IsDir => {},
error.AccessDenied => got_access_denied = true,
error.InvalidUtf8,
error.SymLinkLoop,
error.NameTooLong,
error.SystemResources,
error.ReadOnlyFileSystem,
error.NotDir,
error.FileSystem,
error.FileBusy,
error.BadPathName,
error.Unexpected,
=> return err,
}
{
var dir = Dir.open(allocator, full_path) catch |err| switch (err) {
error.NotDir => {
if (got_access_denied) {
return error.AccessDenied;
}
continue :start_over;
},
error.OutOfMemory,
error.AccessDenied,
error.FileTooBig,
error.IsDir,
error.SymLinkLoop,
error.ProcessFdQuotaExceeded,
error.NameTooLong,
error.SystemFdQuotaExceeded,
error.NoDevice,
error.FileNotFound,
error.SystemResources,
error.NoSpaceLeft,
error.PathAlreadyExists,
error.Unexpected,
error.InvalidUtf8,
error.BadPathName,
error.DeviceBusy,
=> return err,
};
defer dir.close();
var full_entry_buf = std.ArrayList(u8).init(allocator);
defer full_entry_buf.deinit();
while (try dir.next()) |entry| {
try full_entry_buf.resize(full_path.len + entry.name.len + 1);
const full_entry_path = full_entry_buf.toSlice();
mem.copy(u8, full_entry_path, full_path);
full_entry_path[full_path.len] = path.sep;
mem.copy(u8, full_entry_path[full_path.len + 1 ..], entry.name);
try deleteTree(allocator, full_entry_path);
}
}
return deleteDir(full_path);
}
}
/// TODO: separate this API into the one that opens directory handles to then subsequently open
/// files, and into the one that reads files from an open directory handle.
pub const Dir = struct {
handle: Handle,
allocator: *Allocator,
pub const Handle = switch (builtin.os) {
.macosx, .ios, .freebsd, .netbsd => struct {
fd: i32,
seek: i64,
buf: []u8,
index: usize,
end_index: usize,
},
.linux => struct {
fd: i32,
buf: []u8,
index: usize,
end_index: usize,
},
.windows => struct {
handle: os.windows.HANDLE,
find_file_data: os.windows.WIN32_FIND_DATAW,
first: bool,
name_data: [256]u8,
},
else => @compileError("unimplemented"),
};
pub const Entry = struct {
name: []const u8,
kind: Kind,
pub const Kind = enum {
BlockDevice,
CharacterDevice,
Directory,
NamedPipe,
SymLink,
File,
UnixDomainSocket,
Whiteout,
Unknown,
};
};
pub const OpenError = error{
FileNotFound,
NotDir,
AccessDenied,
FileTooBig,
IsDir,
SymLinkLoop,
ProcessFdQuotaExceeded,
NameTooLong,
SystemFdQuotaExceeded,
NoDevice,
SystemResources,
NoSpaceLeft,
PathAlreadyExists,
OutOfMemory,
InvalidUtf8,
BadPathName,
DeviceBusy,
Unexpected,
};
/// Call close when done.
/// TODO remove the allocator requirement from this API
/// https://github.com/ziglang/zig/issues/2885
pub fn open(allocator: *Allocator, dir_path: []const u8) OpenError!Dir {
return Dir{
.allocator = allocator,
.handle = switch (builtin.os) {
.windows => blk: {
var find_file_data: os.windows.WIN32_FIND_DATAW = undefined;
const handle = try os.windows.FindFirstFile(dir_path, &find_file_data);
break :blk Handle{
.handle = handle,
.find_file_data = find_file_data, // TODO guaranteed copy elision
.first = true,
.name_data = undefined,
};
},
.macosx, .ios, .freebsd, .netbsd => Handle{
.fd = try os.open(dir_path, os.O_RDONLY | os.O_NONBLOCK | os.O_DIRECTORY | os.O_CLOEXEC, 0),
.seek = 0,
.index = 0,
.end_index = 0,
.buf = [_]u8{},
},
.linux => Handle{
.fd = try os.open(dir_path, os.O_RDONLY | os.O_DIRECTORY | os.O_CLOEXEC, 0),
.index = 0,
.end_index = 0,
.buf = [_]u8{},
},
else => @compileError("unimplemented"),
},
};
}
pub fn close(self: *Dir) void {
if (os.windows.is_the_target) {
return os.windows.FindClose(self.handle.handle);
}
self.allocator.free(self.handle.buf);
os.close(self.handle.fd);
}
/// Memory such as file names referenced in this returned entry becomes invalid
/// with subsequent calls to next, as well as when this `Dir` is deinitialized.
pub fn next(self: *Dir) !?Entry {
switch (builtin.os) {
.linux => return self.nextLinux(),
.macosx, .ios => return self.nextDarwin(),
.windows => return self.nextWindows(),
.freebsd => return self.nextBsd(),
.netbsd => return self.nextBsd(),
else => @compileError("unimplemented"),
}
}
pub fn openRead(self: Dir, file_path: []const u8) os.OpenError!File {
const path_c = try os.toPosixPath(file_path);
return self.openReadC(&path_c);
}
pub fn openReadC(self: Dir, file_path: [*]const u8) OpenError!File {
const flags = os.O_LARGEFILE | os.O_RDONLY;
const fd = try os.openatC(self.handle.fd, file_path, flags, 0);
return File.openHandle(fd);
}
fn nextDarwin(self: *Dir) !?Entry {
start_over: while (true) {
if (self.handle.index >= self.handle.end_index) {
if (self.handle.buf.len == 0) {
self.handle.buf = try self.allocator.alloc(u8, mem.page_size);
}
while (true) {
const rc = os.system.__getdirentries64(
self.handle.fd,
self.handle.buf.ptr,
self.handle.buf.len,
&self.handle.seek,
);
if (rc == 0) return null;
if (rc < 0) {
switch (os.errno(rc)) {
os.EBADF => unreachable,
os.EFAULT => unreachable,
os.ENOTDIR => unreachable,
os.EINVAL => {
self.handle.buf = try self.allocator.realloc(self.handle.buf, self.handle.buf.len * 2);
continue;
},
else => |err| return os.unexpectedErrno(err),
}
}
self.handle.index = 0;
self.handle.end_index = @intCast(usize, rc);
break;
}
}
const darwin_entry = @ptrCast(*align(1) os.dirent, &self.handle.buf[self.handle.index]);
const next_index = self.handle.index + darwin_entry.d_reclen;
self.handle.index = next_index;
const name = @ptrCast([*]u8, &darwin_entry.d_name)[0..darwin_entry.d_namlen];
if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..")) {
continue :start_over;
}
const entry_kind = switch (darwin_entry.d_type) {
os.DT_BLK => Entry.Kind.BlockDevice,
os.DT_CHR => Entry.Kind.CharacterDevice,
os.DT_DIR => Entry.Kind.Directory,
os.DT_FIFO => Entry.Kind.NamedPipe,
os.DT_LNK => Entry.Kind.SymLink,
os.DT_REG => Entry.Kind.File,
os.DT_SOCK => Entry.Kind.UnixDomainSocket,
os.DT_WHT => Entry.Kind.Whiteout,
else => Entry.Kind.Unknown,
};
return Entry{
.name = name,
.kind = entry_kind,
};
}
}
fn nextWindows(self: *Dir) !?Entry {
while (true) {
if (self.handle.first) {
self.handle.first = false;
} else {
if (!try os.windows.FindNextFile(self.handle.handle, &self.handle.find_file_data))
return null;
}
const name_utf16le = mem.toSlice(u16, self.handle.find_file_data.cFileName[0..].ptr);
if (mem.eql(u16, name_utf16le, [_]u16{'.'}) or mem.eql(u16, name_utf16le, [_]u16{ '.', '.' }))
continue;
// Trust that Windows gives us valid UTF-16LE
const name_utf8_len = std.unicode.utf16leToUtf8(self.handle.name_data[0..], name_utf16le) catch unreachable;
const name_utf8 = self.handle.name_data[0..name_utf8_len];
const kind = blk: {
const attrs = self.handle.find_file_data.dwFileAttributes;
if (attrs & os.windows.FILE_ATTRIBUTE_DIRECTORY != 0) break :blk Entry.Kind.Directory;
if (attrs & os.windows.FILE_ATTRIBUTE_REPARSE_POINT != 0) break :blk Entry.Kind.SymLink;
break :blk Entry.Kind.File;
};
return Entry{
.name = name_utf8,
.kind = kind,
};
}
}
fn nextLinux(self: *Dir) !?Entry {
start_over: while (true) {
if (self.handle.index >= self.handle.end_index) {
if (self.handle.buf.len == 0) {
self.handle.buf = try self.allocator.alloc(u8, mem.page_size);
}
while (true) {
const rc = os.linux.getdents64(self.handle.fd, self.handle.buf.ptr, self.handle.buf.len);
switch (os.linux.getErrno(rc)) {
0 => {},
os.EBADF => unreachable,
os.EFAULT => unreachable,
os.ENOTDIR => unreachable,
os.EINVAL => {
self.handle.buf = try self.allocator.realloc(self.handle.buf, self.handle.buf.len * 2);
continue;
},
else => |err| return os.unexpectedErrno(err),
}
if (rc == 0) return null;
self.handle.index = 0;
self.handle.end_index = rc;
break;
}
}
const linux_entry = @ptrCast(*align(1) os.dirent64, &self.handle.buf[self.handle.index]);
const next_index = self.handle.index + linux_entry.d_reclen;
self.handle.index = next_index;
const name = mem.toSlice(u8, @ptrCast([*]u8, &linux_entry.d_name));
// skip . and .. entries
if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..")) {
continue :start_over;
}
const entry_kind = switch (linux_entry.d_type) {
os.DT_BLK => Entry.Kind.BlockDevice,
os.DT_CHR => Entry.Kind.CharacterDevice,
os.DT_DIR => Entry.Kind.Directory,
os.DT_FIFO => Entry.Kind.NamedPipe,
os.DT_LNK => Entry.Kind.SymLink,
os.DT_REG => Entry.Kind.File,
os.DT_SOCK => Entry.Kind.UnixDomainSocket,
else => Entry.Kind.Unknown,
};
return Entry{
.name = name,
.kind = entry_kind,
};
}
}
fn nextBsd(self: *Dir) !?Entry {
start_over: while (true) {
if (self.handle.index >= self.handle.end_index) {
if (self.handle.buf.len == 0) {
self.handle.buf = try self.allocator.alloc(u8, mem.page_size);
}
while (true) {
const rc = os.system.getdirentries(
self.handle.fd,
self.handle.buf.ptr,
self.handle.buf.len,
&self.handle.seek,
);
switch (os.errno(rc)) {
0 => {},
os.EBADF => unreachable,
os.EFAULT => unreachable,
os.ENOTDIR => unreachable,
os.EINVAL => {
self.handle.buf = try self.allocator.realloc(self.handle.buf, self.handle.buf.len * 2);
continue;
},
else => |err| return os.unexpectedErrno(err),
}
if (rc == 0) return null;
self.handle.index = 0;
self.handle.end_index = @intCast(usize, rc);
break;
}
}
const freebsd_entry = @ptrCast(*align(1) os.dirent, &self.handle.buf[self.handle.index]);
const next_index = self.handle.index + freebsd_entry.d_reclen;
self.handle.index = next_index;
const name = @ptrCast([*]u8, &freebsd_entry.d_name)[0..freebsd_entry.d_namlen];
if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..")) {
continue :start_over;
}
const entry_kind = switch (freebsd_entry.d_type) {
os.DT_BLK => Entry.Kind.BlockDevice,
os.DT_CHR => Entry.Kind.CharacterDevice,
os.DT_DIR => Entry.Kind.Directory,
os.DT_FIFO => Entry.Kind.NamedPipe,
os.DT_LNK => Entry.Kind.SymLink,
os.DT_REG => Entry.Kind.File,
os.DT_SOCK => Entry.Kind.UnixDomainSocket,
os.DT_WHT => Entry.Kind.Whiteout,
else => Entry.Kind.Unknown,
};
return Entry{
.name = name,
.kind = entry_kind,
};
}
}
};
pub const Walker = struct {
stack: std.ArrayList(StackItem),
name_buffer: std.Buffer,
pub const Entry = struct {
path: []const u8,
basename: []const u8,
kind: Dir.Entry.Kind,
};
const StackItem = struct {
dir_it: Dir,
dirname_len: usize,
};
/// After each call to this function, and on deinit(), the memory returned
/// from this function becomes invalid. A copy must be made in order to keep
/// a reference to the path.
pub fn next(self: *Walker) !?Entry {
while (true) {
if (self.stack.len == 0) return null;
// `top` becomes invalid after appending to `self.stack`.
const top = &self.stack.toSlice()[self.stack.len - 1];
const dirname_len = top.dirname_len;
if (try top.dir_it.next()) |base| {
self.name_buffer.shrink(dirname_len);
try self.name_buffer.appendByte(path.sep);
try self.name_buffer.append(base.name);
if (base.kind == .Directory) {
// TODO https://github.com/ziglang/zig/issues/2888
var new_dir = try Dir.open(self.stack.allocator, self.name_buffer.toSliceConst());
{
errdefer new_dir.close();
try self.stack.append(StackItem{
.dir_it = new_dir,
.dirname_len = self.name_buffer.len(),
});
}
}
return Entry{
.basename = self.name_buffer.toSliceConst()[dirname_len + 1 ..],
.path = self.name_buffer.toSliceConst(),
.kind = base.kind,
};
} else {
self.stack.pop().dir_it.close();
}
}
}
pub fn deinit(self: *Walker) void {
while (self.stack.popOrNull()) |*item| item.dir_it.close();
self.stack.deinit();
self.name_buffer.deinit();
}
};
/// Recursively iterates over a directory.
/// Must call `Walker.deinit` when done.
/// `dir_path` must not end in a path separator.
/// TODO: https://github.com/ziglang/zig/issues/2888
pub fn walkPath(allocator: *Allocator, dir_path: []const u8) !Walker {
assert(!mem.endsWith(u8, dir_path, path.sep_str));
var dir_it = try Dir.open(allocator, dir_path);
errdefer dir_it.close();
var name_buffer = try std.Buffer.init(allocator, dir_path);
errdefer name_buffer.deinit();
var walker = Walker{
.stack = std.ArrayList(Walker.StackItem).init(allocator),
.name_buffer = name_buffer,
};
try walker.stack.append(Walker.StackItem{
.dir_it = dir_it,
.dirname_len = dir_path.len,
});
return walker;
}
/// Read value of a symbolic link.
/// The return value is a slice of buffer, from index `0`.
/// TODO https://github.com/ziglang/zig/issues/2888
pub fn readLink(pathname: []const u8, buffer: *[os.PATH_MAX]u8) ![]u8 {
return os.readlink(pathname, buffer);
}
/// Same as `readLink`, except the `pathname` parameter is null-terminated.
/// TODO https://github.com/ziglang/zig/issues/2888
pub fn readLinkC(pathname: [*]const u8, buffer: *[os.PATH_MAX]u8) ![]u8 {
return os.readlinkC(pathname, buffer);
}
pub const OpenSelfExeError = os.OpenError || os.windows.CreateFileError || SelfExePathError;
pub fn openSelfExe() OpenSelfExeError!File {
if (os.linux.is_the_target) {
return File.openC(c"/proc/self/exe", File.READ);
}
if (os.windows.is_the_target) {
var buf: [os.windows.PATH_MAX_WIDE]u16 = undefined;
const wide_slice = try selfExePathW(&buf);
return File.openW(wide_slice.ptr, File.READ);
}
var buf: [MAX_PATH_BYTES]u8 = undefined;
const self_exe_path = try selfExePath(&buf);
buf[self_exe_path.len] = 0;
return File.openC(self_exe_path.ptr, File.READ);
}
test "openSelfExe" {
switch (builtin.os) {
.linux, .macosx, .ios, .windows, .freebsd => (try openSelfExe()).close(),
else => return error.SkipZigTest, // Unsupported OS.
}
}
pub const SelfExePathError = os.ReadLinkError || os.SysCtlError;
/// Get the path to the current executable.
/// If you only need the directory, use selfExeDirPath.
/// If you only want an open file handle, use openSelfExe.
/// This function may return an error if the current executable
/// was deleted after spawning.
/// Returned value is a slice of out_buffer.
///
/// On Linux, depends on procfs being mounted. If the currently executing binary has
/// been deleted, the file path looks something like `/a/b/c/exe (deleted)`.
/// TODO make the return type of this a null terminated pointer
pub fn selfExePath(out_buffer: *[MAX_PATH_BYTES]u8) SelfExePathError![]u8 {
if (os.darwin.is_the_target) {
var u32_len: u32 = out_buffer.len;
const rc = std.c._NSGetExecutablePath(out_buffer, &u32_len);
if (rc != 0) return error.NameTooLong;
return mem.toSlice(u8, out_buffer);
}
switch (builtin.os) {
.linux => return os.readlinkC(c"/proc/self/exe", out_buffer),
.freebsd => {
var mib = [4]c_int{ os.CTL_KERN, os.KERN_PROC, os.KERN_PROC_PATHNAME, -1 };
var out_len: usize = out_buffer.len;
try os.sysctl(&mib, out_buffer, &out_len, null, 0);
// TODO could this slice from 0 to out_len instead?
return mem.toSlice(u8, out_buffer);
},
.netbsd => {
var mib = [4]c_int{ os.CTL_KERN, os.KERN_PROC_ARGS, -1, os.KERN_PROC_PATHNAME };
var out_len: usize = out_buffer.len;
try os.sysctl(&mib, out_buffer, &out_len, null, 0);
// TODO could this slice from 0 to out_len instead?
return mem.toSlice(u8, out_buffer);
},
.windows => {
var utf16le_buf: [os.windows.PATH_MAX_WIDE]u16 = undefined;
const utf16le_slice = try selfExePathW(&utf16le_buf);
// Trust that Windows gives us valid UTF-16LE.
const end_index = std.unicode.utf16leToUtf8(out_buffer, utf16le_slice) catch unreachable;
return out_buffer[0..end_index];
},
else => @compileError("std.fs.selfExePath not supported for this target"),
}
}
/// Same as `selfExePath` except the result is UTF16LE-encoded.
pub fn selfExePathW(out_buffer: *[os.windows.PATH_MAX_WIDE]u16) SelfExePathError![]u16 {
return os.windows.GetModuleFileNameW(null, out_buffer, out_buffer.len);
}
/// `selfExeDirPath` except allocates the result on the heap.
/// Caller owns returned memory.
pub fn selfExeDirPathAlloc(allocator: *Allocator) ![]u8 {
var buf: [MAX_PATH_BYTES]u8 = undefined;
return mem.dupe(allocator, u8, try selfExeDirPath(&buf));
}
/// Get the directory path that contains the current executable.
/// Returned value is a slice of out_buffer.
pub fn selfExeDirPath(out_buffer: *[MAX_PATH_BYTES]u8) SelfExePathError![]const u8 {
if (os.linux.is_the_target) {
// If the currently executing binary has been deleted,
// the file path looks something like `/a/b/c/exe (deleted)`
// This path cannot be opened, but it's valid for determining the directory
// the executable was in when it was run.
const full_exe_path = try os.readlinkC(c"/proc/self/exe", out_buffer);
// Assume that /proc/self/exe has an absolute path, and therefore dirname
// will not return null.
return path.dirname(full_exe_path).?;
}
const self_exe_path = try selfExePath(out_buffer);
// Assume that the OS APIs return absolute paths, and therefore dirname
// will not return null.
return path.dirname(self_exe_path).?;
}
/// `realpath`, except caller must free the returned memory.
pub fn realpathAlloc(allocator: *Allocator, pathname: []const u8) ![]u8 {
var buf: [MAX_PATH_BYTES]u8 = undefined;
return mem.dupe(allocator, u8, try os.realpath(pathname, &buf));
}
test "" {
_ = @import("fs/path.zig");
_ = @import("fs/file.zig");
_ = @import("fs/get_app_data_dir.zig");
} | std/fs.zig |
extern fn pam_start(
service_name: [*:0]const u8,
user: ?[*:0]const u8,
conversation: *const Conv,
pamh: **Handle,
) Result;
pub const start = pam_start;
pub const Handle = opaque {
extern fn pam_end(
pamh: *Handle,
/// Should be set to the result of the last pam library call.
pam_status: Result,
) Result;
pub const end = pam_end;
extern fn pam_authenticate(pamh: *Handle, flags: c_int) Result;
pub const authenticate = pam_authenticate;
extern fn pam_setcred(pamh: *Handle, flags: c_int) Result;
pub const setcred = pam_setcred;
};
pub const Message = extern struct {
msg_style: enum(c_int) {
prompt_echo_off = 1,
prompt_echo_on = 2,
error_msg = 3,
text_info = 4,
},
msg: [*:0]const u8,
};
pub const Response = extern struct {
resp: [*:0]u8,
/// From pam_conv(3):
/// "The resp_retcode member of this struct is unused and should be set to zero."
resp_retcode: c_int = 0,
};
pub const Conv = extern struct {
conv: fn (
num_msg: c_int,
/// Note: This matches the Linux-PAM API, apparently Solaris PAM differs
/// in how the msg argument is used.
msg: [*]*const Message,
/// Out parameter, the [*]Response array will be free'd using free(3)
/// by the caller.
resp: *[*]Response,
appdata_ptr: ?*anyopaque,
) callconv(.C) Result,
appdata_ptr: ?*anyopaque,
};
pub const Result = enum(c_int) {
/// Successful function return
success = 0,
/// dlopen() failure when dynamically loading a service module
open_err = 1,
/// Symbol not found
symbol_err = 2,
/// Error in service module
service_err = 3,
/// System error
system_err = 4,
/// Memory buffer error
buf_err = 5,
/// Permission denied
perm_denied = 6,
/// Authentication failure
auth_err = 7,
/// Can not access authentication data due to insufficient credentials
cred_insufficient = 8,
/// Underlying authentication service can not retrieve authentication information
authinfo_unavail = 9,
/// User not known to the underlying authentication module
user_unknown = 10,
/// An authentication service has maintained a retry count which has
/// been reached. No further retries should be attempted
maxtries = 11,
/// New authentication token required. This is normally returned if the
/// machine security policies require that the password should be changed
/// because the password is NULL or it has aged
new_authtok_reqd = 12,
/// User account has expired
acct_expired = 13,
/// Can not make/remove an entry for the specified session
session_err = 14,
/// Underlying authentication service can not retrieve user credentials unavailable
cred_unavail = 15,
/// User credentials expired
cred_expired = 16,
/// Failure setting user credentials
cred_err = 17,
/// No module specific data is present
no_module_data = 18,
/// Conversation error
conv_err = 19,
/// Authentication token manipulation error
authtok_err = 20,
/// Authentication information cannot be recovered
authtok_recovery_err = 21,
/// Authentication token lock busy
authtok_lock_busy = 22,
/// Authentication token aging disabled
authtok_disable_aging = 23,
/// Preliminary check by password service
try_again = 24,
/// Ignore underlying account module regardless of whether the control
/// flag is required, optional, or sufficient
ignore = 25,
/// Critical error (?module fail now request)
abort = 26,
/// user's authentication token has expired
authtok_expired = 27,
/// module is not known
module_unknown = 28,
/// Bad item passed to pam_*_item()
bad_item = 29,
/// conversation function is event driven and data is not available yet
conv_again = 30,
/// please call this function again to complete authentication
/// stack. Before calling again, verify that conversation is completed
incomplete = 31,
/// The pamh argument to this function is ignored by the implementation.
extern fn pam_strerror(pamh: ?*Handle, errnum: Result) [*:0]const u8;
pub fn description(result: Result) [*:0]const u8 {
return pam_strerror(null, result);
}
};
// Flags intended to be bitwise or'ed together
pub const flags = struct {
/// Authentication service should not generate any messages
pub const silent = 0x8000;
// Note: these flags are used by pam_authenticate{,_secondary}()
/// The authentication service should return .auth_err if
/// user has a null authentication token
pub const disallow_null_authtok = 0x0001;
// Note: these flags are used for pam_setcred()
/// Set user credentials for an authentication service
pub const estblish_cred = 0x0002;
/// Delete user credentials associated with an authentication service
pub const delete_cred = 0x0004;
/// Reinitialize user credentials
pub const reinitialize_cred = 0x0008;
/// Extend lifetime of user credentials
pub const refresh_cred = 0x0010;
}; | src/pam.zig |
const assert = @import("std").debug.assert;
const math = @import("std").math;
fn radians(deg: f32) f32 {
return deg * (math.pi / 180.0);
}
pub const Vec2 = packed struct {
x: f32, y: f32,
pub fn zero() Vec2 {
return Vec2 { .x = 0.0, .y = 0.0 };
}
pub fn one() Vec2 {
return Vec2 { .x = 1.0, .y = 1.0 };
}
pub fn new(x: f32, y: f32) Vec2 {
return Vec2 { .x = x, .y = y };
}
};
pub const Vec3 = packed struct {
x: f32, y: f32, z: f32,
pub fn zero() Vec3 {
return Vec3 { .x=0.0, .y=0.0, .z=0.0 };
}
pub fn new(x: f32, y: f32, z: f32) Vec3 {
return Vec3 { .x=x, .y=y, .z=z };
}
pub fn up() Vec3 {
return Vec3 { .x=0.0, .y=1.0, .z=0.0 };
}
pub fn len(v: Vec3) f32 {
return math.sqrt(Vec3.dot(v, v));
}
pub fn add(left: Vec3, right: Vec3) Vec3 {
return Vec3 {
.x = left.x + right.x,
.y = left.y + right.y,
.z = left.z + right.z
};
}
pub fn sub(left: Vec3, right: Vec3) Vec3 {
return Vec3 {
.x = left.x - right.x,
.y = left.y - right.y,
.z = left.z - right.z
};
}
pub fn mul(v: Vec3, s: f32) Vec3 {
return Vec3 {
.x = v.x * s,
.y = v.y * s,
.z = v.z * s
};
}
pub fn norm(v: Vec3) Vec3 {
const l = Vec3.len(v);
if (l != 0.0) {
return Vec3 {
.x = v.x / l,
.y = v.y / l,
.z = v.z / l
};
}
else {
return Vec3.zero();
}
}
pub fn cross(v0: Vec3, v1: Vec3) Vec3 {
return Vec3 {
.x = (v0.y * v1.z) - (v0.z * v1.y),
.y = (v0.z * v1.x) - (v0.x * v1.z),
.z = (v0.x * v1.y) - (v0.y * v1.x)
};
}
pub fn dot(v0: Vec3, v1: Vec3) f32 {
return v0.x * v1.x + v0.y * v1.y + v0.z * v1.z;
}
};
pub const Mat3 = packed struct {
m: [3][3]f32,
pub fn identity() Mat3 {
return Mat3 {
.m = [_][3]f32 {
.{ 1.0, 0.0, 0.0 },
.{ 0.0, 1.0, 0.0 },
.{ 0.0, 0.0, 1.0 },
},
};
}
pub fn zero() Mat3 {
return Mat3 {
.m = [_][3]f32 {
.{ 0.0, 0.0, 0.0 },
.{ 0.0, 0.0, 0.0 },
.{ 0.0, 0.0, 0.0 },
},
};
}
pub fn mul(left: Mat3, right: Mat3) Mat3 {
var res = Mat3.zero();
var col: usize = 0;
while (col < 3): (col += 1) {
var row: usize = 0;
while (row < 3): (row += 1) {
res.m[col][row] = left.m[0][row] * right.m[col][0] +
left.m[1][row] * right.m[col][1] +
left.m[2][row] * right.m[col][2];
}
}
return res;
}
pub fn rotate(angle: f32, axis_unorm: Vec3) Mat3 {
// TODO
return _;
}
pub fn translate(mat: *Mat3, translation: Vec2) void {
mat.m[2][0] += translation.x;
mat.m[2][1] += translation.y;
}
pub fn scale(mat: *Mat3, scale: Vec2) void {
mat.m[0][0] *= scale.x;
mat.m[1][1] *= scale.y;
}
pub fn fromTranslation(translation: Vec2) Mat3 {
var res = Mat3.identity();
res.m[2][0] = translation.x;
res.m[2][1] = translation.y;
return res;
}
pub fn fromScale(scale: Vec2) Mat3 {
var res = Mat3.identity();
res.m[0][0] = scale.x;
res.m[1][1] = scale.y;
return res;
}
};
pub const Mat4 = packed struct {
m: [4][4]f32,
pub fn identity() Mat4 {
return Mat4 {
.m = [_][4]f32 {
.{ 1.0, 0.0, 0.0, 0.0 },
.{ 0.0, 1.0, 0.0, 0.0 },
.{ 0.0, 0.0, 1.0, 0.0 },
.{ 0.0, 0.0, 0.0, 1.0 }
},
};
}
pub fn zero() Mat4 {
return Mat4 {
.m = [_][4]f32 {
.{ 0.0, 0.0, 0.0, 0.0 },
.{ 0.0, 0.0, 0.0, 0.0 },
.{ 0.0, 0.0, 0.0, 0.0 },
.{ 0.0, 0.0, 0.0, 0.0 }
},
};
}
pub fn mul(left: Mat4, right: Mat4) Mat4 {
var res = Mat4.zero();
var col: usize = 0;
while (col < 4): (col += 1) {
var row: usize = 0;
while (row < 4): (row += 1) {
res.m[col][row] = left.m[0][row] * right.m[col][0] +
left.m[1][row] * right.m[col][1] +
left.m[2][row] * right.m[col][2] +
left.m[3][row] * right.m[col][3];
}
}
return res;
}
pub fn persp(fov: f32, aspect: f32, near: f32, far: f32) Mat4 {
var res = Mat4.identity();
const t = math.tan(fov * (math.pi / 360.0));
res.m[0][0] = 1.0 / t;
res.m[1][1] = aspect / t;
res.m[2][3] = -1.0;
res.m[2][2] = (near + far) / (near - far);
res.m[3][2] = (2.0 * near * far) / (near - far);
res.m[3][3] = 0.0;
return res;
}
pub fn lookat(eye: Vec3, center: Vec3, up: Vec3) Mat4 {
var res = Mat4.zero();
const f = Vec3.norm(Vec3.sub(center, eye));
const s = Vec3.norm(Vec3.cross(f, up));
const u = Vec3.cross(s, f);
res.m[0][0] = s.x;
res.m[0][1] = u.x;
res.m[0][2] = -f.x;
res.m[1][0] = s.y;
res.m[1][1] = u.y;
res.m[1][2] = -f.y;
res.m[2][0] = s.z;
res.m[2][1] = u.z;
res.m[2][2] = -f.z;
res.m[3][0] = -Vec3.dot(s, eye);
res.m[3][1] = -Vec3.dot(u, eye);
res.m[3][2] = Vec3.dot(f, eye);
res.m[3][3] = 1.0;
return res;
}
pub fn rotate(angle: f32, axis_unorm: Vec3) Mat4 {
var res = Mat4.identity();
const axis = Vec3.norm(axis_unorm);
const sin_theta = math.sin(radians(angle));
const cos_theta = math.cos(radians(angle));
const cos_value = 1.0 - cos_theta;
res.m[0][0] = (axis.x * axis.x * cos_value) + cos_theta;
res.m[0][1] = (axis.x * axis.y * cos_value) + (axis.z * sin_theta);
res.m[0][2] = (axis.x * axis.z * cos_value) - (axis.y * sin_theta);
res.m[1][0] = (axis.y * axis.x * cos_value) - (axis.z * sin_theta);
res.m[1][1] = (axis.y * axis.y * cos_value) + cos_theta;
res.m[1][2] = (axis.y * axis.z * cos_value) + (axis.x * sin_theta);
res.m[2][0] = (axis.z * axis.x * cos_value) + (axis.y * sin_theta);
res.m[2][1] = (axis.z * axis.y * cos_value) - (axis.x * sin_theta);
res.m[2][2] = (axis.z * axis.z * cos_value) + cos_theta;
return res;
}
pub fn translate(translation: Vec3) Mat4 {
var res = Mat4.identity();
res.m[3][0] = translation.x;
res.m[3][1] = translation.y;
res.m[3][2] = translation.z;
return res;
}
};
test "Vec3.zero" {
const v = Vec3.zero();
assert(v.x == 0.0 and v.y == 0.0 and v.z == 0.0);
}
test "Vec3.new" {
const v = Vec3.new(1.0, 2.0, 3.0);
assert(v.x == 1.0 and v.y == 2.0 and v.z == 3.0);
}
test "Mat4.ident" {
const m = Mat4.identity();
for (m.m) |row, y| {
for (row) |val, x| {
if (x == y) {
assert(val == 1.0);
}
else {
assert(val == 0.0);
}
}
}
}
test "Mat4.mul"{
const l = Mat4.identity();
const r = Mat4.identity();
const m = Mat4.mul(l, r);
for (m.m) |row, y| {
for (row) |val, x| {
if (x == y) {
assert(val == 1.0);
}
else {
assert(val == 0.0);
}
}
}
}
fn eq(val: f32, cmp: f32) bool {
const delta: f32 = 0.00001;
return (val > (cmp-delta)) and (val < (cmp+delta));
}
test "Mat4.persp" {
const m = Mat4.persp(60.0, 1.33333337, 0.01, 10.0);
assert(eq(m.m[0][0], 1.73205));
assert(eq(m.m[0][1], 0.0));
assert(eq(m.m[0][2], 0.0));
assert(eq(m.m[0][3], 0.0));
assert(eq(m.m[1][0], 0.0));
assert(eq(m.m[1][1], 2.30940));
assert(eq(m.m[1][2], 0.0));
assert(eq(m.m[1][3], 0.0));
assert(eq(m.m[2][0], 0.0));
assert(eq(m.m[2][1], 0.0));
assert(eq(m.m[2][2], -1.00200));
assert(eq(m.m[2][3], -1.0));
assert(eq(m.m[3][0], 0.0));
assert(eq(m.m[3][1], 0.0));
assert(eq(m.m[3][2], -0.02002));
assert(eq(m.m[3][3], 0.0));
}
test "Mat4.lookat" {
const m = Mat4.lookat(.{ .x=0.0, .y=1.5, .z=6.0 }, Vec3.zero(), Vec3.up());
assert(eq(m.m[0][0], 1.0));
assert(eq(m.m[0][1], 0.0));
assert(eq(m.m[0][2], 0.0));
assert(eq(m.m[0][3], 0.0));
assert(eq(m.m[1][0], 0.0));
assert(eq(m.m[1][1], 0.97014));
assert(eq(m.m[1][2], 0.24253));
assert(eq(m.m[1][3], 0.0));
assert(eq(m.m[2][0], 0.0));
assert(eq(m.m[2][1], -0.24253));
assert(eq(m.m[2][2], 0.97014));
assert(eq(m.m[2][3], 0.0));
assert(eq(m.m[3][0], 0.0));
assert(eq(m.m[3][1], 0.0));
assert(eq(m.m[3][2], -6.18465));
assert(eq(m.m[3][3], 1.0));
}
test "Mat4.rotate" {
const m = Mat4.rotate(2.0, .{ .x=0.0, .y=1.0, .z=0.0 });
assert(eq(m.m[0][0], 0.99939));
assert(eq(m.m[0][1], 0.0));
assert(eq(m.m[0][2], -0.03489));
assert(eq(m.m[0][3], 0.0));
assert(eq(m.m[1][0], 0.0));
assert(eq(m.m[1][1], 1.0));
assert(eq(m.m[1][2], 0.0));
assert(eq(m.m[1][3], 0.0));
assert(eq(m.m[2][0], 0.03489));
assert(eq(m.m[2][1], 0.0));
assert(eq(m.m[2][2], 0.99939));
assert(eq(m.m[2][3], 0.0));
assert(eq(m.m[3][0], 0.0));
assert(eq(m.m[3][1], 0.0));
assert(eq(m.m[3][2], 0.0));
assert(eq(m.m[3][3], 1.0));
} | src/math.zig |
const warn = std.debug.warn;
const std = @import("std");
/// The earliest year that can be stored in a Date
pub const minYear: i32 = i32(std.math.minInt(i23));
/// The latest year that can be stored in a Date
pub const maxYear: i32 = i32(std.math.maxInt(i23));
/// The earliest month that can be stored in a Date:
/// 1 => January
pub const minMonth: i32 = 1;
/// The latest month that can be stored in a Date:
/// 12 => December
pub const maxMonth: i32 = 12;
/// The earliest day of the month that can be stored in a Date: 1
pub const minDay: i32 = 1;
/// The latest day of the month that can be stored in a Date: 31
pub const maxDay: i32 = 31;
const nbrOfDaysPer400Years: i32 = 146097;
const nbrOfDaysPer100Years: i32 = 36524;
const nbrOfDaysPer4Years: i32 = 1461;
const nbrOfDaysPerYear: i32 = 365;
// 1970-1-1 was 11017 days before 2000-3-1
const unixEpochBeginsOnDay: i32 = -11017;
const dayOffset = []i32{ 0, 31, 61, 92, 122, 153, 184, 214, 245, 275, 306, 337 };
/// The names of the weekdays, in English.
pub const Weekday = enum {
Monday,
Tuesday,
Wednesday,
Thursday,
Friday,
Saturday,
Sunday,
};
/// The names of the months, in English.
pub const Month = enum {
January,
February,
March,
April,
May,
June,
July,
August,
September,
October,
November,
December,
};
const err = error.RangeError;
const DivPair = struct {
quotient: i32,
modulus: i32,
};
/// Floored Division. Assumes divisor > 0.
fn flooredDivision(dividend: i32, comptime divisor: i32) DivPair {
if (divisor == 0) {
@compileError("division by zero");
}
if (divisor < 0) {
@compileError("floored division implementation does not allow a negative divisor");
}
const m = @rem(if (dividend < 0) -dividend else dividend, divisor);
return DivPair{
.quotient = @divFloor(dividend, divisor),
.modulus = if (m != 0 and dividend < 0) divisor - m else m,
};
}
/// A Gregorian date. The size is guaranteed to be 4-bytes, therefore it is
/// inexpensive to pass by value. Construct a Date in one of the following
/// ways:
/// FromYmd -- accepts a Gregorian date in year/month/day format
/// FromCode -- accepts a date code
/// FromCardinal -- accepts a cardinal date
pub const Date = packed struct {
const Self = @This();
_day: u5,
_month: u4,
_year: i23,
/// Accessor for year
pub fn year(self: Self) i32 {
return @intCast(i32, self._year);
}
/// Accessor for month
pub fn month(self: Self) i32 {
return @intCast(i32, self._month) + 1;
}
/// Accessor for month as a Month (enum)
pub fn monthEnum(self: Self) Month {
return @intToEnum(Month, self._month);
}
/// Accessor for day of the month
pub fn day(self: Self) i32 {
return @intCast(i32, self._day) + 1;
}
/// Compare this with another date (the `rhs` or "right hand side"). The result is
/// less than zero if this date is before the rhs, greater than zero if this date is
/// after the rhs, or zero if the two dates are the same.
pub fn compare(self: Self, rhs: Self) i32 {
return compare2(self, rhs);
}
/// Calculate the date code, an integer representing the number of days since the
/// start of the Unix epoch (1970-1-1). Support for negative results allows us to
/// map Gregorian dates exactly to 1582-2-24, when it was instituted by
/// <NAME> XIII. Adoption varies by nation, but has been in place worldwide
/// since 1926.
pub fn code(self: Self) i32 {
// We take the approach of starting the year on March 1 so that the leap day falls
// at the end. To do this we pretend January and February are part of the previous
// year.
//
// Our internal representation will choose as its base date any day which is
// at the start of the 400-year Gregorian cycle. We have arbitrarily selected
// 2000-3-1.
const dr = flooredDivision(self.month() - 3, 12);
const dr400 = flooredDivision(self.year() + dr.quotient - 2000, 400);
const dr100 = flooredDivision(dr400.modulus, 100);
const dr4 = flooredDivision(dr100.modulus, 4);
return dr400.quotient * nbrOfDaysPer400Years + dr100.quotient * nbrOfDaysPer100Years + dr4.quotient * nbrOfDaysPer4Years + nbrOfDaysPerYear * dr4.modulus + dayOffset[@intCast(usize, dr.modulus)] + self.day() - unixEpochBeginsOnDay - 1;
}
};
fn isYearInRange(y: i32) bool {
return minYear <= y and y <= maxYear;
}
fn isMonthInRange(m: i32) bool {
return minMonth <= m and m <= maxMonth;
}
fn isDayInRange(d: i32) bool {
return minDay <= d and d <= maxDay;
}
/// Returns an integer representing the day of the week, for the given date code.
/// 0 => Monday, 6 => Sunday
pub fn dayOfWeek(datecode: i32) i32 {
return flooredDivision(datecode + 3, 7).modulus;
}
/// Returns an enumerated value representing the day of the week, for the given
/// date code.
pub fn weekday(datecode: i32) Weekday {
return @intToEnum(Weekday, @truncate(@TagType(Weekday), @intCast(u32, dayOfWeek(datecode))));
}
pub fn findDayOffsetIdx(bdc: i32) usize {
// find the month in the table
var gamma: usize = 0;
inline for (dayOffset) |ofs| {
if (bdc < ofs) {
gamma -= 1;
break;
} else if (bdc == ofs or gamma == 11) {
break;
}
gamma += 1;
}
return gamma;
}
/// Construct a Date using a date code. This constructor requires more computation than
/// the other two, so prefer the others if possible.
pub fn FromCode(datecode: i32) Date {
// dateCode has the number of days relative to 1/1/1970, shift this ahead to 3/1/2000
const dr400 = flooredDivision(datecode + unixEpochBeginsOnDay, nbrOfDaysPer400Years);
var dr100 = flooredDivision(dr400.modulus, nbrOfDaysPer100Years);
// put the leap day at the end of 400-year cycle
if (dr100.quotient == 4) {
dr100.quotient -= 1;
dr100.modulus += nbrOfDaysPer100Years;
}
const dr4 = flooredDivision(dr100.modulus, nbrOfDaysPer4Years);
var dr1 = flooredDivision(dr4.modulus, nbrOfDaysPerYear);
// put the leap day at the end of 4-year cycle
if (dr1.quotient == 4) {
dr1.quotient -= 1;
dr1.modulus += nbrOfDaysPerYear;
}
const gamma = findDayOffsetIdx(dr1.modulus);
if (gamma >= 10) {
dr1.quotient += 1;
}
return Date{
._year = @intCast(i23, dr400.quotient * 400 + dr100.quotient * 100 + dr4.quotient * 4 + dr1.quotient + 2000),
._month = @intCast(u4, (gamma + 2) % 12),
._day = @intCast(u5, dr1.modulus - dayOffset[gamma]),
};
}
/// Compare two dates, the `lhs` ("left hand side") and `rhs` ("right hand side").
/// Returns an integer that is less than zero if `lhs` is before `rhs`,
/// greater than zero if `lhs` is after `rhs`, and zero if they both refer to the
/// same date.
pub fn compare2(lhs: Date, rhs: Date) i32 {
var res: i64 = @intCast(i64, @bitCast(i32, lhs)) - @bitCast(i32, rhs);
if (res < 0) {
return -1;
} else if (res > 0) {
return 1;
} else {
return 0;
}
}
/// Construct a Date from its Gregorian year, month, and day. This will fail
/// if any of the inputs are out of range. Note that the range checking only
/// assures that the values can be stored in the internal data structure
/// without losing information. It does allow setting to values which would
/// not be possible in the Gregorian calendar. For example: FromYmd(2000, 2, 30)
/// is perfectly acceptable, even though February 2000 only had 29 days.
/// However, FromYmd(2000, 1, 32) will be rejected.
pub fn FromYmd(y: i32, m: i32, d: i32) !Date {
if (isYearInRange(y) and isMonthInRange(m) and isDayInRange(d)) {
return Date{
._year = @intCast(i23, y),
._month = @intCast(u4, m - 1),
._day = @intCast(u5, d - 1),
};
} else {
return err;
}
}
/// The earliest date which can be represented.
pub const min = comptime Date{ ._year = minYear, ._month = minMonth - 1, ._day = minDay - 1 };
/// The latest date which can be represented.
pub const max = comptime Date{ ._year = maxYear, ._month = maxMonth - 1, ._day = maxDay - 1 };
/// An enumeration of the cardinal values 1 through 5, to be used as an input
/// to cardinal date methods.
pub const Nth = enum {
First,
Second,
Third,
Fourth,
Last,
};
/// Return the date code where year and month are known, and you want to select a
/// specific occurrence of a given weekday. For example, the Second Tuesday in November 2020.
/// This function may fail, if the year or month inputs are out of range.
pub fn cardinalCode(nth: Nth, wkdy: Weekday, y: i32, m: i32) !i32 {
const d = try FromYmd(y, m, 1);
var dc: i32 = d.code();
const dow1st = dayOfWeek(dc);
var wkdy2: i32 = @enumToInt(wkdy);
if (wkdy2 < dow1st) {
wkdy2 += 7;
}
dc += wkdy2 - dow1st + 7 * @intCast(i32, @enumToInt(nth));
if (nth == Nth.Last) {
// check that the fifth week is actually in the same month
const d2 = FromCode(dc);
if (d2.month() != m) {
dc -= 7;
}
}
return dc;
}
/// Construct a Date, when year and month are known, and you want to select a
/// specific occurrence of a given weekday. For example, the Second Tuesday in November 2020.
/// This function may fail, if the year or month inputs are out of range.
pub fn FromCardinal(nth: Nth, wkdy: Weekday, y: i32, m: i32) !Date {
var dc = try cardinalCode(nth, wkdy, y, m);
return FromCode(dc);
} | src/gregorianDate.zig |
const std = @import("std");
const Command = @import("Command.zig");
const flag = @import("flag.zig");
// const Arg = @import("Arg.zig");
const testing = std.testing;
const allocator = std.heap.page_allocator;
fn initAppArgs(alloc: std.mem.Allocator) !Command {
var app = Command.new(alloc, "app");
// app <ARG-ONE>
try app.takesSingleValue("ARG-ONE");
// app <ARG-MANY...>
try app.takesNValues("ARG-MANY", 3);
// app [-b, --bool-flag]
try app.addArg(flag.boolean("bool-flag", 'b'));
try app.addArg(flag.boolean("bool-flag2", 'c'));
// var bool_flag = Arg.new("bool-flag");
// bool_flag.shortName('b');
// bool_flag.setLongNameSameAsName();
// try app.addArg(bool_flag);
// app [-1, --arg_one_flag <VALUE>]
try app.addArg(flag.argOne("arg-one-flag", '1'));
// var arg_one_flag = Arg.new("arg-one-flag");
// arg_one_flag.shortName('1');
// arg_one_flag.setLongNameSameAsName();
// arg_one_flag.takesValue(true);
// try app.addArg(arg_one_flag);
// app [-3, --argn-flag <VALUE...>
try app.addArg(flag.argN("argn-flag", '3', 3));
// var argn_flag = Arg.new("argn-flag");
// argn_flag.shortName('3');
// argn_flag.setLongNameSameAsName();
// argn_flag.maxValues(3);
// argn_flag.valuesDelimiter(",");
// try app.addArg(argn_flag);
// app [-o, --option-flag <opt1 | opt2 | opt3>]
try app.addArg(flag.option("option-flag", 'o', &[_][]const u8{
"opt1",
"opt2",
"opt3",
}));
// var opt_flag = Arg.new("option-flag");
// opt_flag.shortName('o');
// opt_flag.setLongNameSameAsName();
// opt_flag.allowedValues(&[_][]const u8{
// "opt1",
// "opt2",
// "opt3",
// });
// try app.addArg(opt_flag);
// app subcmd1
try app.addSubcommand(Command.new(alloc, "subcmd1"));
return app;
}
test "arg required error" {
const argv: []const [:0]const u8 = &.{
"--mode",
"debug",
};
var app = try initAppArgs(allocator);
app.argRequired(true);
defer app.deinit();
try testing.expectError(error.CommandArgumentNotProvided, app.parseFrom(argv));
}
test "subcommand required error" {
const argv: []const [:0]const u8 = &.{
"",
};
var app = try initAppArgs(allocator);
app.subcommandRequired(true);
defer app.deinit();
try testing.expectError(error.MissingCommandSubCommand, app.parseFrom(argv));
}
test "command that takes value" {
const argv: []const [:0]const u8 = &.{
"argone",
"argmany1",
"argmany2",
"argmany3",
};
var app = try initAppArgs(allocator);
defer app.deinit();
var matches = try app.parseFrom(argv);
try testing.expectEqualStrings("argone", matches.valueOf("ARG-ONE").?);
const many_values = matches.valuesOf("ARG-MANY").?;
try testing.expectEqualStrings("argmany1", many_values[0]);
try testing.expectEqualStrings("argmany2", many_values[1]);
try testing.expectEqualStrings("argmany3", many_values[2]);
}
test "flags" {
const argv: []const [:0]const u8 = &.{
"-bc",
"-1one",
"--argn-flag=val1,val2,val3",
"--option-flag",
"opt2",
};
var app = try initAppArgs(allocator);
defer app.deinit();
var matches = try app.parseFrom(argv);
defer matches.deinit();
try testing.expect(matches.isPresent("bool-flag") == true);
try testing.expect(matches.isPresent("bool-flag2") == true);
try testing.expectEqualStrings("one", matches.valueOf("arg-one-flag").?);
const argn_values = matches.valuesOf("argn-flag").?;
try testing.expectEqualStrings("val1", argn_values[0]);
try testing.expectEqualStrings("val2", argn_values[1]);
try testing.expectEqualStrings("val3", argn_values[2]);
try testing.expectEqualStrings("opt2", matches.valueOf("option-flag").?);
} | src/main.zig |
usingnamespace @import("core.zig");
const glfw = @import("glfw");
const vk = @import("vulkan");
usingnamespace @import("vulkan/instance.zig");
const Device = @import("vulkan/device.zig").Device;
const Swapchain = @import("vulkan/swapchain.zig").Swapchain;
const Mesh = @import("renderer/mesh.zig").Mesh;
const TransferQueue = @import("transfer_queue.zig").TransferQueue;
const MeshManager = @import("renderer/mesh_manager.zig").MeshManager;
const imgui = @import("Imgui.zig");
const resources = @import("resources");
const Input = @import("input.zig").Input;
const GPU_TIMEOUT: u64 = std.math.maxInt(u64);
const ColorVertex = struct {
const Self = @This();
const binding_description = vk.VertexInputBindingDescription{
.binding = 0,
.stride = @sizeOf(Self),
.input_rate = .vertex,
};
const attribute_description = [_]vk.VertexInputAttributeDescription{
.{
.binding = 0,
.location = 0,
.format = .r32g32b32_sfloat,
.offset = @byteOffsetOf(Self, "pos"),
},
.{
.binding = 0,
.location = 1,
.format = .r32g32b32_sfloat,
.offset = @byteOffsetOf(Self, "color"),
},
};
pos: Vector3,
color: Vector3,
};
pub const tri_vertices = [_]ColorVertex{
.{ .pos = Vector3.new(0, -0.75, 0.0), .color = Vector3.new(1, 0, 0) },
.{ .pos = Vector3.new(-0.75, 0.75, 0.0), .color = Vector3.new(0, 1, 0) },
.{ .pos = Vector3.new(0.75, 0.75, 0.0), .color = Vector3.new(0, 0, 1) },
};
pub const Renderer = struct {
const Self = @This();
allocator: *Allocator,
instance: Instance,
device: Device,
surface: vk.SurfaceKHR,
swapchain: Swapchain,
swapchain_index: u32,
graphics_queue: vk.Queue,
graphics_command_pool: vk.CommandPool,
//TODO: multiple frames in flight
device_frame: DeviceFrame,
transfer_queue: TransferQueue,
images_descriptor_layout: vk.DescriptorSetLayout,
images_descriptor_pool: vk.DescriptorPool,
images_descriptor_set: vk.DescriptorSet,
imgui_layer: imgui.Layer,
meshes: MeshManager,
tri_pipeline_layout: vk.PipelineLayout,
tri_pipeline: vk.Pipeline,
tri_mesh: Mesh,
pub fn init(allocator: *Allocator, window: glfw.Window) !Self {
const vulkan_support = try glfw.vulkanSupported();
if (!vulkan_support) {
return error.VulkanNotSupported;
}
var instance = try Instance.init(allocator, "Saturn Editor", AppVersion(0, 0, 0, 0));
var selected_device = instance.pdevices[0];
var selected_queue_index: u32 = 0;
var device = try Device.init(allocator, instance.dispatch, selected_device, selected_queue_index);
var surface = try createSurface(instance.handle, window);
var supports_surface = try instance.dispatch.getPhysicalDeviceSurfaceSupportKHR(selected_device, selected_queue_index, surface);
if (supports_surface == 0) {
return error.NoDeviceSurfaceSupport;
}
var swapchain = try Swapchain.init(allocator, instance.dispatch, device, selected_device, surface);
var graphics_queue = device.dispatch.getDeviceQueue(device.handle, selected_queue_index, 0);
var graphics_command_pool = try device.dispatch.createCommandPool(
device.handle,
.{
.flags = .{ .reset_command_buffer_bit = true },
.queue_family_index = selected_queue_index,
},
null,
);
const sampled_image_count: u32 = 1;
const bindings = [_]vk.DescriptorSetLayoutBinding{.{
.binding = 0,
.descriptor_type = .combined_image_sampler,
.descriptor_count = sampled_image_count,
.stage_flags = .{ .fragment_bit = true },
.p_immutable_samplers = null,
}};
const pool_sizes = [_]vk.DescriptorPoolSize{.{
.type_ = .combined_image_sampler,
.descriptor_count = sampled_image_count,
}};
var images_descriptor_layout = try device.dispatch.createDescriptorSetLayout(
device.handle,
.{
.flags = .{ .update_after_bind_pool_bit = true },
.binding_count = bindings.len,
.p_bindings = &bindings,
},
null,
);
var images_descriptor_pool = try device.dispatch.createDescriptorPool(device.handle, .{
.flags = .{ .update_after_bind_bit = true },
.max_sets = 1,
.pool_size_count = pool_sizes.len,
.p_pool_sizes = &pool_sizes,
}, null);
var images_descriptor_set: vk.DescriptorSet = .null_handle;
_ = try device.dispatch.allocateDescriptorSets(
device.handle,
.{
.descriptor_pool = images_descriptor_pool,
.descriptor_set_count = 1,
.p_set_layouts = @ptrCast([*]const vk.DescriptorSetLayout, &images_descriptor_layout),
},
@ptrCast([*]vk.DescriptorSet, &images_descriptor_set),
);
var device_frame = try DeviceFrame.init(device, graphics_command_pool);
var transfer_queue = TransferQueue.init(allocator, device);
var command_buffer = try beginSingleUseCommandBuffer(device, graphics_command_pool);
try endSingleUseCommandBuffer(device, graphics_queue, graphics_command_pool, command_buffer);
var descriptor_set_layouts = [_]vk.DescriptorSetLayout{images_descriptor_layout};
var imgui_layer = try imgui.Layer.init(allocator, device, &transfer_queue, swapchain.render_pass, &descriptor_set_layouts);
var image_write = vk.DescriptorImageInfo{
.sampler = imgui_layer.texture_sampler,
.image_view = imgui_layer.texture_atlas.image_view,
.image_layout = .shader_read_only_optimal,
};
var write_descriptor_set = vk.WriteDescriptorSet{
.dst_set = images_descriptor_set,
.dst_binding = 0,
.dst_array_element = 0,
.descriptor_count = 1,
.descriptor_type = .combined_image_sampler,
.p_image_info = @ptrCast([*]vk.DescriptorImageInfo, &image_write),
.p_buffer_info = undefined,
.p_texel_buffer_view = undefined,
};
device.dispatch.updateDescriptorSets(
device.handle,
1,
@ptrCast([*]vk.WriteDescriptorSet, &write_descriptor_set),
0,
undefined,
);
var push_constant_range = vk.PushConstantRange{
.stage_flags = .{ .vertex_bit = true },
.offset = 0,
.size = 64,
};
var tri_pipeline_layout = try device.dispatch.createPipelineLayout(device.handle, .{
.flags = .{},
.set_layout_count = 0,
.p_set_layouts = undefined,
.push_constant_range_count = 1,
.p_push_constant_ranges = @ptrCast([*]const vk.PushConstantRange, &push_constant_range),
}, null);
var tri_pipeline = try device.createPipeline(
tri_pipeline_layout,
swapchain.render_pass,
&resources.tri_vert,
&resources.tri_frag,
&ColorVertex.binding_description,
&ColorVertex.attribute_description,
&.{
.cull_mode = .{},
.blend_enable = false,
.src_color_blend_factor = .src_alpha,
.dst_color_blend_factor = .one_minus_src_alpha,
.color_blend_op = .add,
.src_alpha_blend_factor = .src_alpha,
.dst_alpha_blend_factor = .one_minus_src_alpha,
.alpha_blend_op = .add,
},
);
var tri_mesh = try Mesh.init(ColorVertex, u32, device, 3, 3);
transfer_queue.copyToBuffer(tri_mesh.vertex_buffer, ColorVertex, &tri_vertices);
transfer_queue.copyToBuffer(tri_mesh.index_buffer, u32, &[_]u32{ 0, 1, 2 });
var meshes = MeshManager.init(allocator, device);
//TODO: temp call
var mesh_id = meshes.load("assets/sphere.obj");
return Self{
.allocator = allocator,
.instance = instance,
.device = device,
.surface = surface,
.swapchain = swapchain,
.swapchain_index = 0,
.graphics_queue = graphics_queue,
.graphics_command_pool = graphics_command_pool,
.device_frame = device_frame,
.transfer_queue = transfer_queue,
.images_descriptor_layout = images_descriptor_layout,
.images_descriptor_pool = images_descriptor_pool,
.images_descriptor_set = images_descriptor_set,
.imgui_layer = imgui_layer,
.meshes = meshes,
.tri_pipeline_layout = tri_pipeline_layout,
.tri_pipeline = tri_pipeline,
.tri_mesh = tri_mesh,
};
}
pub fn deinit(self: *Self) void {
self.device.waitIdle();
//TODO: temp
self.device.dispatch.destroyPipeline(self.device.handle, self.tri_pipeline, null);
self.device.dispatch.destroyPipelineLayout(self.device.handle, self.tri_pipeline_layout, null);
self.tri_mesh.deinit();
self.device.dispatch.destroyDescriptorPool(self.device.handle, self.images_descriptor_pool, null);
self.device.dispatch.destroyDescriptorSetLayout(self.device.handle, self.images_descriptor_layout, null);
self.meshes.deinit();
self.imgui_layer.deinit();
self.transfer_queue.deinit();
self.device_frame.deinit();
self.swapchain.deinit();
self.device.dispatch.destroyCommandPool(self.device.handle, self.graphics_command_pool, null);
self.device.deinit();
self.instance.dispatch.destroySurfaceKHR(self.instance.handle, self.surface, null);
self.instance.deinit();
}
pub fn update(self: Self, window: glfw.Window, input: *Input, delta_time: f32) void {
self.imgui_layer.update(window, input, delta_time);
}
pub fn render(self: *Self) !void {
var begin_result = try self.beginFrame();
if (begin_result) |command_buffer| {
self.device.dispatch.cmdBindPipeline(command_buffer, .graphics, self.tri_pipeline);
var size = self.swapchain.extent;
var model = Matrix4.model(Vector3.new(0, 0, 5), Quaternion.identity, Vector3.one);
var view = Matrix4.view_lh(Vector3.new(0, 0, -5), Quaternion.identity);
var perspective = Matrix4.perspective_lh_zo(3.1415926 / 4.0, @intToFloat(f32, size.width) / @intToFloat(f32, size.height), 0.1, 100);
var mvp = perspective.mul(view).mul(model);
self.device.dispatch.cmdPushConstants(command_buffer, self.tri_pipeline_layout, .{ .vertex_bit = true }, 0, @sizeOf(Matrix4), &mvp.data);
if (self.meshes.get(0)) |mesh| {
self.device.dispatch.cmdBindVertexBuffers(command_buffer, 0, 1, &[_]vk.Buffer{mesh.vertex_buffer.handle}, &[_]u64{0});
self.device.dispatch.cmdBindIndexBuffer(command_buffer, mesh.index_buffer.handle, 0, vk.IndexType.uint32);
self.device.dispatch.cmdDrawIndexed(command_buffer, mesh.index_count, 1, 0, 0, 0);
} else {
self.device.dispatch.cmdBindVertexBuffers(command_buffer, 0, 1, &[_]vk.Buffer{self.tri_mesh.vertex_buffer.handle}, &[_]u64{0});
self.device.dispatch.cmdBindIndexBuffer(command_buffer, self.tri_mesh.index_buffer.handle, 0, vk.IndexType.uint32);
self.device.dispatch.cmdDrawIndexed(command_buffer, self.tri_mesh.index_count, 1, 0, 0, 0);
}
self.imgui_layer.beginFrame();
try self.imgui_layer.endFrame(command_buffer, &[_]vk.DescriptorSet{self.images_descriptor_set});
try self.endFrame();
}
}
fn beginFrame(self: *Self) !?vk.CommandBuffer {
var current_frame = &self.device_frame;
var fence = @ptrCast([*]const vk.Fence, ¤t_frame.frame_done_fence);
_ = try self.device.dispatch.waitForFences(self.device.handle, 1, fence, 1, GPU_TIMEOUT);
if (self.swapchain.getNextImage(current_frame.image_ready_semaphore)) |index| {
self.swapchain_index = index;
} else {
//Swapchain invlaid don't render this frame
return null;
}
_ = try self.device.dispatch.resetFences(self.device.handle, 1, fence);
self.transfer_queue.clearResources();
self.meshes.flush();
try self.device.dispatch.beginCommandBuffer(current_frame.command_buffer, .{
.flags = .{},
.p_inheritance_info = null,
});
self.transfer_queue.commitTransfers(current_frame.command_buffer);
self.meshes.transfers.commitTransfers(current_frame.command_buffer);
const extent = self.swapchain.extent;
const viewports = [_]vk.Viewport{.{
.x = 0,
.y = 0,
.width = @intToFloat(f32, extent.width),
.height = @intToFloat(f32, extent.height),
.min_depth = 0,
.max_depth = 1,
}};
const scissors = [_]vk.Rect2D{.{
.offset = .{ .x = 0, .y = 0 },
.extent = extent,
}};
self.device.dispatch.cmdSetViewport(current_frame.command_buffer, 0, 1, &viewports);
self.device.dispatch.cmdSetScissor(current_frame.command_buffer, 0, 1, &scissors);
const clears_values = [_]vk.ClearValue{.{
.color = .{ .float_32 = .{ 0, 0, 0, 1 } },
}};
self.device.dispatch.cmdBeginRenderPass(
current_frame.command_buffer,
.{
.render_pass = self.swapchain.render_pass,
.framebuffer = self.swapchain.framebuffers.items[self.swapchain_index],
.render_area = .{
.offset = .{ .x = 0, .y = 0 },
.extent = extent,
},
.clear_value_count = 1,
.p_clear_values = &clears_values,
},
.@"inline",
);
return current_frame.command_buffer;
}
fn endFrame(self: *Self) !void {
var current_frame = &self.device_frame;
self.device.dispatch.cmdEndRenderPass(current_frame.command_buffer);
try self.device.dispatch.endCommandBuffer(current_frame.command_buffer);
var wait_stages = vk.PipelineStageFlags{
.color_attachment_output_bit = true,
};
const submit_infos = [_]vk.SubmitInfo{.{
.wait_semaphore_count = 1,
.p_wait_semaphores = &[_]vk.Semaphore{current_frame.image_ready_semaphore},
.p_wait_dst_stage_mask = &[_]vk.PipelineStageFlags{wait_stages},
.command_buffer_count = 1,
.p_command_buffers = &[_]vk.CommandBuffer{current_frame.command_buffer},
.signal_semaphore_count = 1,
.p_signal_semaphores = &[_]vk.Semaphore{current_frame.present_semaphore},
}};
try self.device.dispatch.queueSubmit(self.graphics_queue, 1, &submit_infos, current_frame.frame_done_fence);
_ = self.device.dispatch.queuePresentKHR(self.graphics_queue, .{
.wait_semaphore_count = 1,
.p_wait_semaphores = &[_]vk.Semaphore{current_frame.present_semaphore},
.swapchain_count = 1,
.p_swapchains = &[_]vk.SwapchainKHR{self.swapchain.handle},
.p_image_indices = &[_]u32{self.swapchain_index},
.p_results = null,
}) catch |err| {
switch (err) {
error.OutOfDateKHR => {
self.swapchain.invalid = true;
},
else => return err,
}
};
}
};
fn beginSingleUseCommandBuffer(device: Device, command_pool: vk.CommandPool) !vk.CommandBuffer {
var command_buffer: vk.CommandBuffer = undefined;
try device.dispatch.allocateCommandBuffers(device.handle, .{
.command_pool = command_pool,
.level = .primary,
.command_buffer_count = 1,
}, @ptrCast([*]vk.CommandBuffer, &command_buffer));
try device.dispatch.beginCommandBuffer(command_buffer, .{
.flags = .{},
.p_inheritance_info = null,
});
return command_buffer;
}
fn endSingleUseCommandBuffer(device: Device, queue: vk.Queue, command_pool: vk.CommandPool, command_buffer: vk.CommandBuffer) !void {
try device.dispatch.endCommandBuffer(command_buffer);
const submitInfo = vk.SubmitInfo{
.wait_semaphore_count = 0,
.p_wait_semaphores = undefined,
.p_wait_dst_stage_mask = undefined,
.command_buffer_count = 1,
.p_command_buffers = @ptrCast([*]const vk.CommandBuffer, &command_buffer),
.signal_semaphore_count = 0,
.p_signal_semaphores = undefined,
};
try device.dispatch.queueSubmit(queue, 1, @ptrCast([*]const vk.SubmitInfo, &submitInfo), vk.Fence.null_handle);
try device.dispatch.queueWaitIdle(queue);
device.dispatch.freeCommandBuffers(
device.handle,
command_pool,
1,
@ptrCast([*]const vk.CommandBuffer, &command_buffer),
);
}
fn createSurface(instance: vk.Instance, window: glfw.Window) !vk.SurfaceKHR {
var surface: vk.SurfaceKHR = undefined;
if ((try glfw.createWindowSurface(instance, window, null, &surface)) != @enumToInt(vk.Result.success)) {
return error.SurfaceCreationFailed;
}
return surface;
}
const DeviceFrame = struct {
const Self = @This();
device: Device,
frame_done_fence: vk.Fence,
image_ready_semaphore: vk.Semaphore,
present_semaphore: vk.Semaphore,
command_buffer: vk.CommandBuffer,
fn init(
device: Device,
pool: vk.CommandPool,
) !Self {
var frame_done_fence = try device.dispatch.createFence(device.handle, .{
.flags = .{ .signaled_bit = true },
}, null);
var image_ready_semaphore = try device.dispatch.createSemaphore(device.handle, .{
.flags = .{},
}, null);
var present_semaphore = try device.dispatch.createSemaphore(device.handle, .{
.flags = .{},
}, null);
var command_buffer: vk.CommandBuffer = undefined;
try device.dispatch.allocateCommandBuffers(device.handle, .{
.command_pool = pool,
.level = .primary,
.command_buffer_count = 1,
}, @ptrCast([*]vk.CommandBuffer, &command_buffer));
return Self{
.device = device,
.frame_done_fence = frame_done_fence,
.image_ready_semaphore = image_ready_semaphore,
.present_semaphore = present_semaphore,
.command_buffer = command_buffer,
};
}
fn deinit(self: Self) void {
self.device.dispatch.destroyFence(self.device.handle, self.frame_done_fence, null);
self.device.dispatch.destroySemaphore(self.device.handle, self.image_ready_semaphore, null);
self.device.dispatch.destroySemaphore(self.device.handle, self.present_semaphore, null);
}
}; | src/renderer.zig |
const std = @import("std");
const hash = std.crypto.hash;
const mem = std.mem;
const rand = std.rand;
const testing = std.testing;
const time = std.time;
const log = std.log.scoped(.uuid);
const Uuid = @This();
bytes: [16]u8,
pub const nil = fromInt(0);
/// Creates a new UUID from a 16-byte slice. Only validates the slice length.
pub fn fromSlice(bytes: []const u8) error{InvalidSize}!Uuid {
if (bytes.len < 16) return error.InvalidSize;
var uuid: Uuid = undefined;
std.mem.copy(u8, &uuid.bytes, bytes);
return uuid;
}
/// Creates a new UUID from a u128. Performs no validation.
pub fn fromInt(value: u128) Uuid {
var uuid: Uuid = undefined;
std.mem.writeIntBig(u128, &uuid.bytes, value);
return uuid;
}
fn formatHex(dst: []u8, src: []const u8) error{InvalidSize}!void {
if (dst.len < 2 * src.len) return error.InvalidSize;
const alphabet = "0123456789abcdef";
var d: usize = 0;
var s: usize = 0;
while (d < dst.len and s < src.len) : ({
d += 2;
s += 1;
}) {
const byte = src[s];
dst[d] = alphabet[byte >> 4];
dst[d + 1] = alphabet[byte & 0xf];
}
}
/// Formats the UUID to the buffer according to RFC-4122.
pub fn formatBuf(self: Uuid, buf: []u8) error{InvalidSize}!void {
if (buf.len < 36) return error.InvalidSize;
formatHex(buf[0..8], self.bytes[0..4]) catch unreachable;
buf[8] = '-';
formatHex(buf[9..13], self.bytes[4..6]) catch unreachable;
buf[13] = '-';
formatHex(buf[14..18], self.bytes[6..8]) catch unreachable;
buf[18] = '-';
formatHex(buf[19..23], self.bytes[8..10]) catch unreachable;
buf[23] = '-';
formatHex(buf[24..], self.bytes[10..]) catch unreachable;
}
/// Formats the UUID according to RFC-4122.
pub fn format(self: Uuid, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
_ = fmt;
_ = options;
var buf: [36]u8 = undefined;
self.formatBuf(&buf) catch unreachable;
try writer.writeAll(&buf);
}
test "format" {
var buf: [36]u8 = undefined;
_ = try std.fmt.bufPrint(&buf, "{}", .{nil});
try testing.expectEqualStrings("00000000-0000-0000-0000-000000000000", &buf);
_ = try std.fmt.bufPrint(&buf, "{}", .{fromInt(0x0123456789abcdef0123456789abcdef)});
try testing.expectEqualStrings("01234567-89ab-cdef-0123-456789abcdef", &buf);
}
pub const ParseError = error{
InvalidSize,
InvalidCharacter,
};
fn parseHex(dst: []u8, src: []const u8) ParseError!void {
if (src.len & 1 == 1 or dst.len < src.len / 2) return error.InvalidSize;
var d: usize = 0;
var s: usize = 0;
while (d < dst.len and s < src.len) : ({
d += 1;
s += 2;
}) {
dst[d] = switch (src[s]) {
'0'...'9' => |c| c - '0',
'A'...'F' => |c| c - 'A' + 10,
'a'...'f' => |c| c - 'a' + 10,
else => return error.InvalidCharacter,
} << 4 | switch (src[s + 1]) {
'0'...'9' => |c| c - '0',
'A'...'F' => |c| c - 'A' + 10,
'a'...'f' => |c| c - 'a' + 10,
else => return error.InvalidCharacter,
};
}
}
/// Parses a RFC-4122-format string, tolerant of separators.
pub fn parse(str: []const u8) ParseError!Uuid {
if (str.len < 36) return error.InvalidSize;
var uuid: Uuid = undefined;
try parseHex(uuid.bytes[0..4], str[0..8]);
try parseHex(uuid.bytes[4..6], str[9..13]);
try parseHex(uuid.bytes[6..8], str[14..18]);
try parseHex(uuid.bytes[8..10], str[19..23]);
try parseHex(uuid.bytes[10..], str[24..]);
return uuid;
}
test "parse" {
const uuid = try parse("01234567-89ab-cdef-0123-456789abcdef");
try testing.expectEqual(fromInt(0x0123456789abcdef0123456789abcdef).bytes, uuid.bytes);
}
pub fn setVersion(uuid: *Uuid, version: u4) void {
uuid.bytes[6] = (@as(u8, version) << 4) | (uuid.bytes[6] & 0xf);
}
/// Returns the UUID version number.
pub fn getVersion(self: Uuid) u4 {
return @truncate(u4, self.bytes[6] >> 4);
}
pub const Variant = enum {
reserved_ncs,
rfc4122,
reserved_microsoft,
reserved_future,
};
pub fn setVariant(uuid: *Uuid, variant: Uuid.Variant) void {
uuid.bytes[8] = switch (variant) {
.reserved_ncs => uuid.bytes[8] & 0b01111111,
.rfc4122 => 0b10000000 | (uuid.bytes[8] & 0b00111111),
.reserved_microsoft => 0b11000000 | (uuid.bytes[8] & 0b00011111),
.reserved_future => 0b11100000 | (uuid.bytes[8] & 0b0001111),
};
}
/// Returns the UUID variant. All UUIDs created by this library are RFC-4122 variants.
pub fn getVariant(self: Uuid) Variant {
const byte = self.bytes[8];
if (byte >> 7 == 0b0) {
return .reserved_ncs;
} else if (byte >> 6 == 0b10) {
return .rfc4122;
} else if (byte >> 5 == 0b110) {
return .reserved_microsoft;
} else {
return .reserved_future;
}
}
test "variant and version" {
var uuid = try parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8");
try testing.expectEqual(Variant.rfc4122, uuid.getVariant());
try testing.expectEqual(@as(u4, 1), uuid.getVersion());
uuid = try parse("3d813cbb-47fb-32ba-91df-831e1593ac29");
try testing.expectEqual(Variant.rfc4122, uuid.getVariant());
try testing.expectEqual(@as(u4, 3), uuid.getVersion());
uuid = nil;
uuid.setVariant(.rfc4122);
uuid.setVersion(4);
try testing.expectEqual(Variant.rfc4122, uuid.getVariant());
try testing.expectEqual(@as(u4, 4), uuid.getVersion());
}
pub const namespace = struct {
pub const dns = fromInt(0x6ba7b8109dad11d180b400c04fd430c8);
pub const url = fromInt(0x6ba7b8119dad11d180b400c04fd430c8);
pub const iso_oid = fromInt(0x6ba7b8129dad11d180b400c04fd430c8);
pub const x500_dn = fromInt(0x6ba7b8149dad11d180b400c04fd430c8);
};
/// A UUIDv3 is created by combining a namespace UUID and name via MD5.
pub const v3 = struct {
pub const Source = struct {
md5: hash.Md5,
pub fn init(ns: Uuid) Source {
var md5 = hash.Md5.init(.{});
md5.update(&ns.bytes);
return .{ .md5 = md5 };
}
pub fn create(self: Source, name: []const u8) Uuid {
var uuid: Uuid = undefined;
// 128 bits of MD5
var md5 = self.md5;
md5.update(name);
md5.final(&uuid.bytes);
uuid.setVariant(.rfc4122);
uuid.setVersion(3);
return uuid;
}
};
test "Source" {
const source = Source.init(Uuid.namespace.dns);
const uuid1 = source.create("www.example.com");
try testing.expectEqual(Uuid.fromInt(0x5df418813aed351588a72f4a814cf09e), uuid1);
const uuid2 = source.create("www.example.com");
try testing.expectEqual(uuid1, uuid2);
}
pub fn create(ns: Uuid, name: []const u8) Uuid {
return Source.init(ns).create(name);
}
test "create" {
const uuid1 = create(Uuid.namespace.dns, "www.example.com");
try testing.expectEqual(Uuid.fromInt(0x5df418813aed351588a72f4a814cf09e), uuid1);
const uuid2 = create(Uuid.namespace.dns, "www.example.com");
try testing.expectEqual(uuid1, uuid2);
}
};
/// A UUIDv5 is created by combining a namespace UUID and name via SHA-1.
pub const v5 = struct {
pub const Source = struct {
sha1: hash.Sha1,
pub fn init(ns: Uuid) Source {
var sha1 = hash.Sha1.init(.{});
sha1.update(&ns.bytes);
return .{ .sha1 = sha1 };
}
pub fn create(self: Source, name: []const u8) Uuid {
var uuid: Uuid = undefined;
// 128 out of 160 bits of SHA-1
var sha1 = self.sha1;
sha1.update(name);
var buf: [20]u8 = undefined;
sha1.final(&buf);
std.mem.copy(u8, &uuid.bytes, buf[0..16]);
uuid.setVariant(.rfc4122);
uuid.setVersion(5);
return uuid;
}
};
test "Source" {
const source = Source.init(Uuid.namespace.dns);
const uuid1 = source.create("www.example.com");
try testing.expectEqual(Uuid.fromInt(0x2ed6657de927568b95e12665a8aea6a2), uuid1);
const uuid2 = source.create("www.example.com");
try testing.expectEqual(uuid1, uuid2);
}
pub fn create(ns: Uuid, name: []const u8) Uuid {
return Source.init(ns).create(name);
}
test "create" {
const uuid1 = create(Uuid.namespace.dns, "www.example.com");
try testing.expectEqual(Uuid.fromInt(0x2ed6657de927568b95e12665a8aea6a2), uuid1);
const uuid2 = create(Uuid.namespace.dns, "www.example.com");
try testing.expectEqual(uuid1, uuid2);
}
};
/// A UUIDv4 is created from an entropy source.
pub const v4 = struct {
pub fn create(random: rand.Random) Uuid {
var uuid: Uuid = undefined;
// 128 bits of entropy
random.bytes(&uuid.bytes);
uuid.setVariant(.rfc4122);
uuid.setVersion(4);
return uuid;
}
test "create" {
var rng = rand.DefaultPrng.init(0);
const uuid1 = create(rng.random());
const uuid2 = create(rng.random());
try testing.expect(!mem.eql(u8, &uuid1.bytes, &uuid2.bytes));
}
pub const Source = struct {
random: rand.Random,
pub fn init(random: rand.Random) Source {
return .{ .random = random };
}
pub fn create(self: Source) Uuid {
return v4.create(self.random);
}
};
test "Source" {
var rng = rand.DefaultPrng.init(0);
var source = Source.init(rng.random());
const uuid1 = source.create();
const uuid2 = source.create();
try testing.expect(!mem.eql(u8, &uuid1.bytes, &uuid2.bytes));
}
};
/// Used for UUIDv1 & v6.
/// A 14-bit clock sequence that increments monotonically within each 100-ns timestamp interval, and is randomized between intervals.
/// It is thread-safe as one instance is intended to be shared by the whole application to prevent duplicate clock sequences.
pub const Clock = struct {
mutex: std.Thread.Mutex = .{},
timestamp: u60 = 0,
sequence: u14 = 0,
random: rand.Random,
pub fn init(random: rand.Random) Clock {
return .{ .random = random };
}
fn next(self: *Clock, timestamp: u60) u14 {
self.mutex.lock();
defer self.mutex.unlock();
if (timestamp > self.timestamp) {
self.sequence = self.random.int(u14);
self.timestamp = timestamp;
}
const sequence = self.sequence;
self.sequence +%= 1;
return sequence;
}
};
/// A UUIDv1 is created from a timestamp and node ID. The node ID is traditionally a MAC address but may randomized.
pub const v1 = struct {
/// Generates a random node ID suitable for a UUIDv1. This is basically a random MAC address with the multicast bit set.
pub fn randomNode(random: rand.Random) [6]u8 {
var buf: [6]u8 = undefined;
random.bytes(&buf);
buf[0] |= 1;
return buf;
}
/// Number of 100-ns intervals from Gregorian epoch (1582-10-15T00:00:00Z) to Unix epoch (1970-01-01T00:00:00Z)
const epoch_intervals = 12219292800 * (time.ns_per_s / 100);
/// Converts a nanosecond timestamp to a UUID timestamp.
pub fn nanosToTimestamp(nanos: i128) u60 {
const intervals = @divTrunc(nanos, 100);
const from_epoch = intervals + epoch_intervals;
return @truncate(u60, @bitCast(u128, from_epoch));
}
fn setTimestamp(uuid: *Uuid, timestamp: u60) void {
// time-low
mem.writeIntBig(u32, @ptrCast(*[4]u8, &uuid.bytes[0]), @truncate(u32, timestamp));
// time-mid
mem.writeIntBig(u16, @ptrCast(*[2]u8, &uuid.bytes[4]), @truncate(u16, timestamp >> 32));
// time-high
mem.writeIntBig(u16, @ptrCast(*[2]u8, &uuid.bytes[6]), @truncate(u16, timestamp >> 48));
}
pub fn getTimestamp(uuid: Uuid) u60 {
const lo = mem.readIntBig(u32, @ptrCast(*const [4]u8, &uuid.bytes[0]));
const md = mem.readIntBig(u16, @ptrCast(*const [2]u8, &uuid.bytes[4]));
const hi = mem.readIntBig(u16, @ptrCast(*const [2]u8, &uuid.bytes[6])) & 0xfff;
return @as(u60, hi) << 48 | @as(u60, md) << 32 | @as(u60, lo);
}
pub fn create(timestamp: u60, clock: *Clock, node: [6]u8) Uuid {
var uuid: Uuid = undefined;
const sequence = clock.next(timestamp);
// 60 bits of timestamp
setTimestamp(&uuid, timestamp);
// 14 bits of clock sequence
mem.writeIntBig(u16, @ptrCast(*[2]u8, &uuid.bytes[8]), sequence);
// 48 bits of node ID
mem.copy(u8, uuid.bytes[10..], &node);
uuid.setVariant(.rfc4122);
uuid.setVersion(1);
return uuid;
}
test "create" {
var rng = rand.DefaultPrng.init(0);
var clock = Clock.init(rng.random());
const node = randomNode(rng.random());
const uuid1 = create(nanosToTimestamp(time.nanoTimestamp()), &clock, node);
const uuid2 = create(nanosToTimestamp(time.nanoTimestamp()), &clock, node);
log.debug("{}\n{}\n", .{ uuid1, uuid2 });
try testing.expect(!mem.eql(u8, &uuid1.bytes, &uuid2.bytes));
}
pub const Source = struct {
clock: *Clock,
node: [6]u8,
pub fn init(clock: *Clock, node: [6]u8) Source {
return .{
.clock = clock,
.node = node,
};
}
pub fn create(self: Source) Uuid {
const nanos = time.nanoTimestamp();
const timestamp = nanosToTimestamp(nanos);
return v1.create(timestamp, self.clock, self.node);
}
};
test "Source" {
var rng = rand.DefaultPrng.init(0);
var clock = Clock.init(rng.random());
const node = randomNode(rng.random());
const source = Source.init(&clock, node);
const uuid1 = source.create();
const uuid2 = source.create();
log.debug("{}\n{}\n", .{ uuid1, uuid2 });
try testing.expect(!mem.eql(u8, &uuid1.bytes, &uuid2.bytes));
}
pub fn fromV6(uuidV6: Uuid) Uuid {
var uuidV1 = uuidV6;
setTimestamp(&uuidV1, v6.getTimestamp(uuidV6));
uuidV1.setVersion(1);
return uuidV1;
}
test "fromV6" {
var rng = rand.DefaultPrng.init(0);
var clock = Clock.init(rng.random());
const source = v6.Source.init(&clock, rng.random());
const uuidV6 = source.create();
const uuidV1 = fromV6(uuidV6);
try testing.expectEqual(uuidV6.getVariant(), uuidV1.getVariant());
try testing.expectEqual(@as(u4, 1), uuidV1.getVersion());
try testing.expectEqualSlices(u8, uuidV6.bytes[10..], uuidV1.bytes[10..]);
}
};
/// A UUIDv6 is created from a timestamp and entropy source. It sorts lexicographically by timestamp.
pub const v6 = struct {
pub const nanosToTimestamp = v1.nanosToTimestamp;
fn setTimestamp(uuid: *Uuid, timestamp: u60) void {
// time-high
mem.writeIntBig(u48, @ptrCast(*[6]u8, &uuid.bytes[0]), @truncate(u48, timestamp >> 12));
// time-low
mem.writeIntBig(u16, @ptrCast(*[2]u8, &uuid.bytes[6]), @truncate(u16, timestamp & 0xfff));
}
pub fn getTimestamp(uuid: Uuid) u60 {
const hi = mem.readIntBig(u48, @ptrCast(*const [6]u8, &uuid.bytes[0]));
const lo = mem.readIntBig(u16, @ptrCast(*const [2]u8, &uuid.bytes[6])) & 0xfff;
return @as(u60, hi) << 12 | @as(u60, lo);
}
pub fn create(timestamp: u60, clock: *Clock, random: rand.Random) Uuid {
var uuid: Uuid = Uuid.nil;
const sequence = clock.next(timestamp);
// 60 bits of timestamp
setTimestamp(&uuid, timestamp);
// 14 bits of clock sequence
mem.writeIntBig(u16, @ptrCast(*[2]u8, &uuid.bytes[8]), sequence);
// 48 bits of entropy
random.bytes(uuid.bytes[10..]);
uuid.setVariant(.rfc4122);
uuid.setVersion(6);
return uuid;
}
test "create" {
var rng = rand.DefaultPrng.init(0);
var clock = Clock.init(rng.random());
const uuid1 = create(nanosToTimestamp(time.nanoTimestamp()), &clock, rng.random());
const uuid2 = create(nanosToTimestamp(time.nanoTimestamp()), &clock, rng.random());
log.debug("{}\n{}\n", .{ uuid1, uuid2 });
try testing.expect(!mem.eql(u8, &uuid1.bytes, &uuid2.bytes));
}
pub const Source = struct {
clock: *Clock,
random: rand.Random,
pub fn init(clock: *Clock, random: rand.Random) Source {
return .{
.clock = clock,
.random = random,
};
}
pub fn create(self: Source) Uuid {
const nanos = time.nanoTimestamp();
const timestamp = nanosToTimestamp(nanos);
return v6.create(timestamp, self.clock, self.random);
}
};
test "Source" {
var rng = rand.DefaultPrng.init(0);
var clock = Clock.init(rng.random());
const source = Source.init(&clock, rng.random());
const uuid1 = source.create();
const uuid2 = source.create();
log.debug("{}\n{}\n", .{ uuid1, uuid2 });
try testing.expect(!std.mem.eql(u8, &uuid1.bytes, &uuid2.bytes));
}
pub fn fromV1(uuidV1: Uuid) Uuid {
var uuidV6 = uuidV1;
setTimestamp(&uuidV6, v1.getTimestamp(uuidV1));
uuidV6.setVersion(6);
return uuidV6;
}
test "fromV6" {
var rng = rand.DefaultPrng.init(0);
var clock = Clock.init(rng.random());
const source = v1.Source.init(&clock, v1.randomNode(rng.random()));
const uuidV1 = source.create();
const uuidV6 = fromV1(uuidV1);
try testing.expectEqual(uuidV1.getVariant(), uuidV6.getVariant());
try testing.expectEqual(@as(u4, 6), uuidV6.getVersion());
try testing.expectEqualSlices(u8, uuidV1.bytes[10..], uuidV6.bytes[10..]);
}
};
/// A UUIDv7 is created from a timestamp and entropy source, with arbitrary subsecond precision.
/// This implementation uses 30 bits for nanosecond precision, 8 bits for the clock sequence (overflowing into nanoseconds), and the remaining 48 bits for entropy.
pub const v7 = struct {
// v7 allocates:
// - 36 bits for Unix seconds
// - 24 bits for subsecond precision
// - 62 bits for subseconds, clock sequence, or entropy
// Zig has timestamp precision down to the ns; 1s = 1e9ns, so ns can be represented in 30 bits, leaving 56 bits for clock sequence and entropy.
// Let's use 8 bits for clock sequence (256 UUIDs/ns), leaving 48 bits for entropy, matching v1 & v6.
// The clock can overflow into ns, because if we're exceeding 256 UUIDs/ns, our clock probably isn't actually ns-precise - especially considering the simple UUIDv4 takes ~16ns to generate.
var clock = struct {
const Self = @This();
mutex: std.Thread.Mutex = .{},
nanos: i128 = 0,
sequence: u8 = 0,
fn next(self: *Self, nanos: *i128) u8 {
self.mutex.lock();
defer self.mutex.unlock();
if (nanos.* < self.nanos) {
nanos.* = self.nanos;
} else if (nanos.* > self.nanos) {
self.sequence = 0;
self.nanos = nanos.*;
}
const sequence = self.sequence;
if (@addWithOverflow(u8, self.sequence, 1, &self.sequence)) {
self.nanos += 1;
}
return sequence;
}
}{};
/// Binary value that can cover 1e9, the number of nanoseconds in a second
const subsec_decimal_to_binary = @as(f64, 1 << 30);
pub fn create(nanos: i128, random: rand.Random) Uuid {
var v_nanos = nanos;
const sequence = clock.next(&v_nanos); // Get the clock sequence first in case it causes a nanosecond increment.
const secs = @truncate(u36, @bitCast(u128, @divTrunc(v_nanos, time.ns_per_s)));
const sub_dec = @intToFloat(f64, @mod(v_nanos, time.ns_per_s)) / time.ns_per_s;
const sub = @floatToInt(u30, sub_dec * subsec_decimal_to_binary);
var uuid: Uuid = nil;
// 36 bits of Unix seconds
mem.writeIntBig(u32, @ptrCast(*[4]u8, &uuid.bytes[0]), @truncate(u32, secs >> 4));
uuid.bytes[4] = @truncate(u8, secs << 4);
// 12 bits of nanoseconds
uuid.bytes[4] = @truncate(u8, sub >> 26);
uuid.bytes[5] = @truncate(u8, sub >> 18);
// 12 bits of nanoseconds
uuid.bytes[6] = @truncate(u8, sub >> 14);
uuid.bytes[7] = @truncate(u8, sub >> 6);
// 6 bits of nanoseconds
uuid.bytes[8] = @truncate(u6, sub);
// 8 bits of clock sequence
uuid.bytes[9] = sequence;
// 48 bits of entropy
random.bytes(uuid.bytes[10..]);
uuid.setVariant(.rfc4122);
uuid.setVersion(7);
return uuid;
}
test "create" {
var rng = rand.DefaultPrng.init(0);
const uuid1 = create(time.nanoTimestamp(), rng.random());
const uuid2 = create(time.nanoTimestamp(), rng.random());
log.debug("{}\n{}\n", .{ uuid1, uuid2 });
try testing.expect(!mem.eql(u8, &uuid1.bytes, &uuid2.bytes));
}
pub const Source = struct {
random: rand.Random,
pub fn init(random: rand.Random) Source {
return .{ .random = random };
}
pub fn create(self: Source) Uuid {
return v7.create(time.nanoTimestamp(), self.random);
}
};
test "Source" {
var rng = rand.DefaultPrng.init(0);
const source = Source.init(rng.random());
const uuid1 = source.create();
const uuid2 = source.create();
log.debug("{}\n{}\n", .{ uuid1, uuid2 });
try testing.expect(!mem.eql(u8, &uuid1.bytes, &uuid2.bytes));
}
};
test "" {
std.testing.refAllDecls(Uuid);
} | src/Uuid.zig |
const xcb = @import("../xcb.zig");
pub const id = xcb.Extension{ .name = "DPMS", .global_id = 0 };
/// @brief GetVersioncookie
pub const GetVersioncookie = struct {
sequence: c_uint,
};
/// @brief GetVersionRequest
pub const GetVersionRequest = struct {
@"major_opcode": u8,
@"minor_opcode": u8 = 0,
@"length": u16,
@"client_major_version": u16,
@"client_minor_version": u16,
};
/// @brief GetVersionReply
pub const GetVersionReply = struct {
@"response_type": u8,
@"pad0": u8,
@"sequence": u16,
@"length": u32,
@"server_major_version": u16,
@"server_minor_version": u16,
};
/// @brief Capablecookie
pub const Capablecookie = struct {
sequence: c_uint,
};
/// @brief CapableRequest
pub const CapableRequest = struct {
@"major_opcode": u8,
@"minor_opcode": u8 = 1,
@"length": u16,
};
/// @brief CapableReply
pub const CapableReply = struct {
@"response_type": u8,
@"pad0": u8,
@"sequence": u16,
@"length": u32,
@"capable": u8,
@"pad1": [23]u8,
};
/// @brief GetTimeoutscookie
pub const GetTimeoutscookie = struct {
sequence: c_uint,
};
/// @brief GetTimeoutsRequest
pub const GetTimeoutsRequest = struct {
@"major_opcode": u8,
@"minor_opcode": u8 = 2,
@"length": u16,
};
/// @brief GetTimeoutsReply
pub const GetTimeoutsReply = struct {
@"response_type": u8,
@"pad0": u8,
@"sequence": u16,
@"length": u32,
@"standby_timeout": u16,
@"suspend_timeout": u16,
@"off_timeout": u16,
@"pad1": [18]u8,
};
/// @brief SetTimeoutsRequest
pub const SetTimeoutsRequest = struct {
@"major_opcode": u8,
@"minor_opcode": u8 = 3,
@"length": u16,
@"standby_timeout": u16,
@"suspend_timeout": u16,
@"off_timeout": u16,
};
/// @brief EnableRequest
pub const EnableRequest = struct {
@"major_opcode": u8,
@"minor_opcode": u8 = 4,
@"length": u16,
};
/// @brief DisableRequest
pub const DisableRequest = struct {
@"major_opcode": u8,
@"minor_opcode": u8 = 5,
@"length": u16,
};
pub const DPMSMode = extern enum(c_uint) {
@"On" = 0,
@"Standby" = 1,
@"Suspend" = 2,
@"Off" = 3,
};
/// @brief ForceLevelRequest
pub const ForceLevelRequest = struct {
@"major_opcode": u8,
@"minor_opcode": u8 = 6,
@"length": u16,
@"power_level": u16,
};
/// @brief Infocookie
pub const Infocookie = struct {
sequence: c_uint,
};
/// @brief InfoRequest
pub const InfoRequest = struct {
@"major_opcode": u8,
@"minor_opcode": u8 = 7,
@"length": u16,
};
/// @brief InfoReply
pub const InfoReply = struct {
@"response_type": u8,
@"pad0": u8,
@"sequence": u16,
@"length": u32,
@"power_level": u16,
@"state": u8,
@"pad1": [21]u8,
};
test "" {
@import("std").testing.refAllDecls(@This());
} | src/auto/dpms.zig |
const Self = @This();
const std = @import("std");
const mem = std.mem;
const wlr = @import("wlroots");
const wayland = @import("wayland");
const wl = wayland.server.wl;
const river = wayland.server.river;
const server = &@import("main.zig").server;
const util = @import("util.zig");
const Box = @import("Box.zig");
const Server = @import("Server.zig");
const Output = @import("Output.zig");
const View = @import("View.zig");
const ViewStack = @import("view_stack.zig").ViewStack;
const LayoutDemand = @import("LayoutDemand.zig");
const log = std.log.scoped(.layout);
layout: *river.LayoutV3,
namespace: []const u8,
output: *Output,
pub fn create(client: *wl.Client, version: u32, id: u32, output: *Output, namespace: []const u8) !void {
const layout = try river.LayoutV3.create(client, version, id);
if (namespaceInUse(namespace, output, client)) {
layout.sendNamespaceInUse();
layout.setHandler(?*c_void, handleRequestInert, null, null);
return;
}
const node = try util.gpa.create(std.TailQueue(Self).Node);
errdefer util.gpa.destroy(node);
node.data = .{
.layout = layout,
.namespace = try util.gpa.dupe(u8, namespace),
.output = output,
};
output.layouts.append(node);
layout.setHandler(*Self, handleRequest, handleDestroy, &node.data);
// If the namespace matches that of the output, set the layout as
// the active one of the output and arrange it.
if (mem.eql(u8, namespace, output.layoutNamespace())) {
output.pending.layout = &node.data;
output.arrangeViews();
}
}
/// Returns true if the given namespace is already in use on the given output
/// or on another output by a different client.
fn namespaceInUse(namespace: []const u8, output: *Output, client: *wl.Client) bool {
var output_it = server.root.outputs.first;
while (output_it) |output_node| : (output_it = output_node.next) {
var layout_it = output_node.data.layouts.first;
if (output_node.data.wlr_output == output.wlr_output) {
// On this output, no other layout can have our namespace.
while (layout_it) |layout_node| : (layout_it = layout_node.next) {
if (mem.eql(u8, namespace, layout_node.data.namespace)) return true;
}
} else {
// Layouts on other outputs may share the namespace, if they come from the same client.
while (layout_it) |layout_node| : (layout_it = layout_node.next) {
if (mem.eql(u8, namespace, layout_node.data.namespace) and
client != layout_node.data.layout.getClient()) return true;
}
}
}
return false;
}
/// This exists to handle layouts that have been rendered inert (due to the
/// namespace already being in use) until the client destroys them.
fn handleRequestInert(layout: *river.LayoutV3, request: river.LayoutV3.Request, _: ?*c_void) void {
if (request == .destroy) layout.destroy();
}
/// Send a layout demand to the client
pub fn startLayoutDemand(self: *Self, views: u32) void {
log.debug(
"starting layout demand '{s}' on output '{s}'",
.{ self.namespace, mem.sliceTo(&self.output.wlr_output.name, 0) },
);
std.debug.assert(self.output.layout_demand == null);
self.output.layout_demand = LayoutDemand.init(self, views) catch {
log.err("failed starting layout demand", .{});
return;
};
self.layout.sendLayoutDemand(
views,
self.output.usable_box.width,
self.output.usable_box.height,
self.output.pending.tags,
self.output.layout_demand.?.serial,
);
server.root.trackLayoutDemands();
}
fn handleRequest(layout: *river.LayoutV3, request: river.LayoutV3.Request, self: *Self) void {
switch (request) {
.destroy => layout.destroy(),
// We receive this event when the client wants to push a view dimension proposal
// to the layout demand matching the serial.
.push_view_dimensions => |req| {
log.debug(
"layout '{s}' on output '{s}' pushed view dimensions: {} {} {} {}",
.{ self.namespace, mem.sliceTo(&self.output.wlr_output.name, 0), req.x, req.y, req.width, req.height },
);
if (self.output.layout_demand) |*layout_demand| {
// We can't raise a protocol error when the serial is old/wrong
// because we do not keep track of old serials server-side.
// Therefore, simply ignore requests with old/wrong serials.
if (layout_demand.serial != req.serial) return;
layout_demand.pushViewDimensions(self.output, req.x, req.y, req.width, req.height);
}
},
// We receive this event when the client wants to mark the proposed layout
// of the layout demand matching the serial as done.
.commit => |req| {
log.debug(
"layout '{s}' on output '{s}' commited",
.{ self.namespace, mem.sliceTo(&self.output.wlr_output.name, 0) },
);
if (self.output.layout_demand) |*layout_demand| {
// We can't raise a protocol error when the serial is old/wrong
// because we do not keep track of old serials server-side.
// Therefore, simply ignore requests with old/wrong serials.
if (layout_demand.serial == req.serial) layout_demand.apply(self);
}
},
}
}
fn handleDestroy(layout: *river.LayoutV3, self: *Self) void {
self.destroy();
}
pub fn destroy(self: *Self) void {
log.debug(
"destroying layout '{s}' on output '{s}'",
.{ self.namespace, mem.sliceTo(&self.output.wlr_output.name, 0) },
);
// Remove layout from the list
const node = @fieldParentPtr(std.TailQueue(Self).Node, "data", self);
self.output.layouts.remove(node);
// If we are the currently active layout of an output, clean up.
if (self.output.pending.layout == self) {
self.output.pending.layout = null;
if (self.output.layout_demand) |*layout_demand| {
layout_demand.deinit();
self.output.layout_demand = null;
server.root.notifyLayoutDemandDone();
}
}
self.layout.setHandler(?*c_void, handleRequestInert, null, null);
util.gpa.free(self.namespace);
util.gpa.destroy(node);
} | source/river-0.1.0/river/Layout.zig |
const std = @import("std");
const warn = std.debug.warn;
const fmt = std.fmt;
const Allocator = std.mem.Allocator;
const pngShader = @import("pngShader.zig");
const SsaaShader = @import("ssaashader.zig").SsaaShader;
pub const ShaderConfig = @import("shaderconfig.zig").ShaderConfig;
const resolutions = @import("resolutions.zig");
pub const Resolutions = resolutions.Resolutions;
const Res = resolutions.Res;
pub fn render(comptime config: RenderConfig) !void {
const simpleConfig = comptime config.simpleConfig();
try simpleConfig.render();
}
pub const RenderConfig = struct {
Shader: type,
res: Res,
ssaa: usize = 3,
frames: usize = 1,
memoryLimitMiB: usize = 64,
preview: bool = false,
preview_samples: usize,
preview_ssaa: usize = 1,
path: []const u8,
frameTemplate: []const u8,
const Self = @This();
fn simpleConfig(comptime self: Self) SimpleConfig {
const ssaa = if (self.preview) self.preview_ssaa else self.ssaa;
return .{
.Shader = self.Shader,
.res = if (self.preview) self.res.limitPixels(self.preview_samples / ssaa) else self.res,
.ssaa = ssaa,
.frames = self.frames,
.memoryLimitMiB = self.memoryLimitMiB,
.path = self.path,
.frameTemplate = self.frameTemplate,
};
}
};
pub const SimpleConfig = struct {
Shader: type,
res: Res,
ssaa: usize,
frames: usize,
memoryLimitMiB: usize,
path: []const u8,
frameTemplate: []const u8,
const Self = @This();
fn render(comptime self: Self) !void {
warn("▶ {}×{}×{}×{}\n", .{ self.res.width, self.res.height, self.ssaa, self.frames });
defer warn("■\n", .{});
errdefer warn("!", .{});
var frameNo: usize = 0;
while (frameNo < self.frames) : (frameNo += 1) {
var gpa: std.heap.GeneralPurposeAllocator(.{ .enable_memory_limit = true, .verbose_log = false }) = .{};
defer _ = gpa.deinit();
gpa.setRequestedMemoryLimit(self.memoryLimitMiB * 1024 * 1024);
const allocator = &gpa.allocator;
const fileName: [:0]u8 = if (self.frames == 1) try fmt.allocPrintZ(allocator, self.path, .{}) else try fmt.allocPrintZ(allocator, self.frameTemplate, .{frameNo});
defer allocator.free(fileName);
warn("{s}\n", .{fileName});
var timer = try std.time.Timer.start();
const shaderConfig = ShaderConfig{
.res = comptime self.res.scale(self.ssaa),
.frameNo = frameNo,
.time = @intToFloat(f64, frameNo) / @intToFloat(f64, self.frames),
};
const shaderContext = try SsaaShader(@intToFloat(f64, self.res.width), @intToFloat(f64, self.res.height), self.ssaa, self.Shader).init(allocator, shaderConfig);
defer shaderContext.deinit(allocator);
const shaderFn = @TypeOf(shaderContext).shade;
warn(" init: {}ms\n", .{(timer.lap() * std.time.ms_per_s) / std.time.ns_per_s});
try pngShader.fileRender(fileName, &shaderContext, shaderFn, self.res.width, self.res.height, allocator, gpa);
warn(" render: {}ms\n", .{(timer.lap() * std.time.ms_per_s) / std.time.ns_per_s});
}
}
}; | lib/renderer.zig |
const std = @import("std");
const uefi = std.os.uefi;
const elf = std.elf;
const dcommon = @import("common/dcommon.zig");
pub fn halt() noreturn {
asm volatile (
\\ csrci mstatus, 1
\\0: wfi
\\ j 0b
);
unreachable;
}
pub fn transfer(entry_data: *dcommon.EntryData, uart_base: u64, adjusted_entry: u64) callconv(.Inline) noreturn {
// Supervisor mode, MMU disabled. (SATP = 0)
asm volatile (
\\ret
:
: [entry_data] "{a0}" (entry_data),
[entry] "{ra}" (adjusted_entry)
: "memory"
);
unreachable;
}
pub fn cleanInvalidateDCacheICache(start: u64, len: u64) callconv(.Inline) void {
// I think this does enough.
asm volatile (
\\fence.i
::: "memory");
}
export fn relocate(ldbase: u64, dyn: [*]elf.Elf64_Dyn) uefi.Status {
// Ported from
// https://source.denx.de/u-boot/u-boot/-/blob/52ba373b7825e9feab8357065155cf43dfe2f4ff/arch/riscv/lib/reloc_riscv_efi.c.
var rel: ?*elf.Elf64_Rela = null;
var relent: usize = 0;
var relsz: usize = 0;
var i: usize = 0;
while (dyn[i].d_tag != elf.DT_NULL) : (i += 1) {
switch (dyn[i].d_tag) {
elf.DT_RELA => rel = @intToPtr(*elf.Elf64_Rela, dyn[i].d_val + ldbase),
elf.DT_RELASZ => relsz = dyn[i].d_val,
elf.DT_RELAENT => relent = dyn[i].d_val,
else => {},
}
}
if (rel == null and relent == 0) {
return .Success;
}
if (rel == null or relent == 0) {
return .LoadError;
}
var relp = rel.?;
while (relsz > 0) {
if (relp.r_type() == 3) {
// R_RISCV_RELATIVE
var addr: *u64 = @intToPtr(*u64, ldbase + relp.r_offset);
if (relp.r_addend > 0) {
addr.* = ldbase + std.math.absCast(relp.r_addend);
} else {
addr.* = ldbase - std.math.absCast(relp.r_addend);
}
} else {
asm volatile (
\\j 0
:
: [r_info] "{t0}" (relp.r_info),
[dyn] "{t1}" (@ptrToInt(dyn)),
[i] "{t2}" (i),
[rel] "{t3}" (@ptrToInt(rel))
: "memory"
);
}
relp = @intToPtr(*elf.Elf64_Rela, @ptrToInt(relp) + relent);
relsz -= relent;
}
return .Success;
}
// For whatever reason, memset and memcpy implementations aren't being
// included, and it's adding a PLT and GOT to have them looked up later. They
// aren't being provided by anyone else, so we must.
export fn memset(b: *c_void, c: c_int, len: usize) *c_void {
std.mem.set(u8, @ptrCast([*]u8, b)[0..len], @truncate(u8, std.math.absCast(c)));
return b;
}
export fn memcpy(dst: *c_void, src: *const c_void, n: usize) *c_void {
std.mem.copy(u8, @ptrCast([*]u8, dst)[0..n], @ptrCast([*]const u8, src)[0..n]);
return dst;
} | dainboot/src/riscv64.zig |
const std = @import("std");
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
const assert = std.debug.assert;
/// Asserts at compile time that `T` is an integer, returns `T`
pub fn requireInt(comptime T: type) type {
comptime assert(@typeInfo(T) == .Int);
return T;
}
/// Asserts at compile time that `T` is a nsigned integer, returns `T`
pub fn requireSignedInt(comptime T: type) type {
_ = requireInt(T);
comptime assert(@typeInfo(T).Int.signedness == .signed);
return T;
}
/// Asserts at compile time that `T` is an unsigned integer, returns `T`
pub fn requireUnsignedInt(comptime T: type) type {
_ = requireInt(T);
comptime assert(@typeInfo(T).Int.signedness == .unsigned);
return T;
}
/// Compute the sign of an integer
/// Returns true if the sign bit of `val` is set, otherwise false
/// https://github.com/cryptocode/bithacks#CopyIntegerSign
pub fn isSignBitSet(val: anytype) bool {
const T = requireSignedInt(@TypeOf(val));
return -(@intCast(T, @boolToInt(val < 0))) == -1;
}
test "Compute the sign of an integer" {
var cases = [5]i32{ std.math.minInt(i32), -1, 0, 1, std.math.maxInt(i32) };
var expected = [5]bool{ true, true, false, false, false };
for (cases) |num, i| {
try expect(isSignBitSet(num) == expected[i]);
}
}
/// Detect if two integers have opposite signs
/// Returns true if the `first` and `second` signed integers have opposite signs
/// https://github.com/cryptocode/bithacks#detect-if-two-integers-have-opposite-signs
pub fn isOppositeSign(first: anytype, second: @TypeOf(first)) bool {
_ = requireSignedInt(@TypeOf(first));
return (first ^ second) < 0;
}
test "Detect if two integers have opposite signs" {
try expect(isOppositeSign(@as(i32, -1), @as(i32, 1)));
try expect(!isOppositeSign(@as(i32, 1), @as(i32, 1)));
}
/// Compute the integer absolute value (abs) without branching
/// https://github.com/cryptocode/bithacks#compute-the-integer-absolute-value-abs-without-branching
pub fn absFast(val: anytype) @TypeOf(val) {
const T = requireSignedInt(@TypeOf(val));
const bits = @typeInfo(T).Int.bits;
const mask: T = val >> (bits - 1);
return (val + mask) ^ mask;
}
test "Compute the integer absolute value (abs) without branching" {
var cases = [5]i32{ std.math.minInt(i32) + 1, -1, 0, 1, std.math.maxInt(i32) };
var expected = [5]i32{ std.math.maxInt(i32), 1, 0, 1, std.math.maxInt(i32) };
for (cases) |num, i| {
try expect(absFast(num) == expected[i]);
}
}
/// Find the minimum of two integers without branching
/// https://github.com/cryptocode/bithacks#compute-the-minimum-min-or-maximum-max-of-two-integers-without-branching
pub fn minFast(x: anytype, y: @TypeOf(x)) @TypeOf(x) {
_ = requireSignedInt(@TypeOf(x));
return y ^ ((x ^ y) & -@intCast(@TypeOf(x), @boolToInt((x < y))));
}
/// Find the maximum of two signed integers without branching
/// https://github.com/cryptocode/bithacks#compute-the-minimum-min-or-maximum-max-of-two-integers-without-branching
pub fn maxFast(x: anytype, y: @TypeOf(x)) @TypeOf(x) {
_ = requireSignedInt(@TypeOf(x));
return x ^ ((x ^ y) & -@intCast(@TypeOf(x), @boolToInt((x < y))));
}
test "Compute the minimum (min) or maximum (max) of two integers without branching" {
const x: i32 = 19;
const y: i32 = -13;
try expect(minFast(x, y) == y);
try expect(maxFast(x, y) == x);
}
/// Determining if an integer is a power of 2
/// Generic function that checks if the input integer is a power of 2. If the input
/// is a signed integer, the generated function will include a call to absFast()
/// https://github.com/cryptocode/bithacks#determining-if-an-integer-is-a-power-of-2
pub fn isPowerOf2(val: anytype) bool {
const T = @TypeOf(val);
const abs = if (@typeInfo(T) == .Int and @typeInfo(T).Int.signedness == .signed) absFast(val) else val;
return abs != 0 and (abs & (abs - 1)) == 0;
}
test "Determining if an integer is a power of 2" {
try expect(isPowerOf2(@as(i32, -64)));
try expect(!isPowerOf2(@as(i32, -63)));
try expect(isPowerOf2(@as(u32, 64)));
try expect(!isPowerOf2(@as(u32, 63)));
try expect(!isPowerOf2(@as(u32, 0)));
}
/// Sign extending from a constant bit-width
/// Input `val` is an unsigned n-bit numbers that is reinterpreted as a signed integer which
/// is then signed-extended to the `target` type and returned.
/// https://github.com/cryptocode/bithacks#sign-extending-from-a-constant-bit-width
pub fn signExtendFixed(comptime target: type, val: anytype) target {
const T = requireUnsignedInt(@TypeOf(val));
const SignedType = std.meta.Int(.signed, @typeInfo(T).Int.bits);
return @bitCast(SignedType, val);
}
test "Sign extending from a constant bit-width2" {
// Input is -3 in 4-bit two's complement representation, which we sign extend to an i16
try expectEqual(signExtendFixed(i16, @as(u4, 0b1101)), -3);
try expectEqual(signExtendFixed(i16, @as(u5, 0b10000)), -16);
}
/// Sign extending from a variable bit-width
/// The `val` argument is an integer with size >= `bits`, but only `bits` number of bits actually
/// represents the number to be sign-extended to the `target` type.
/// https://github.com/cryptocode/bithacks#sign-extending-from-a-variable-bit-width
pub fn signExtendVariable(comptime target: type, comptime bits: usize, val: anytype) target {
const T = requireSignedInt(@TypeOf(val));
const val_bits = @typeInfo(T).Int.bits;
comptime assert(val_bits >= bits);
// Only necessary if any bits above `bits` are non-zero
const only_relevant_bits = val & ((@as(usize, 1) << bits) - 1);
const diff: usize = val_bits - bits;
return (only_relevant_bits << diff) >> diff;
}
test "Sign extending from a variable bit-width" {
// Input is 0b10110110, but we only care about the lower 3 bits which we sign extend into an i16
const res = signExtendVariable(i16, 3, @bitCast(i8, @as(u8, 0b10110110)));
try expect(res == -2);
}
/// Conditionally set or clear bits without branching
/// https://github.com/cryptocode/bithacks#conditionally-set-or-clear-bits-without-branching
pub fn setOrClearBits(set: bool, mask: anytype, val: anytype) @TypeOf(val) {
_ = requireInt(@TypeOf(mask));
const T = requireInt(@TypeOf(val));
return (val & ~mask) | (-%@as(T, @boolToInt(set)) & mask);
}
test "Conditionally set or clear bits without branching" {
const mask: u8 = 0b10110010;
const bits: u8 = 0b01000011;
var res = setOrClearBits(true, mask, bits);
try expect(res == 0b11110011);
res = setOrClearBits(false, mask, bits);
try expectEqual(res, 0b01000001);
}
/// Conditionally negate a value without branching
/// https://github.com/cryptocode/bithacks#conditionally-negate-a-value-without-branching
pub fn negateIf(negate: bool, val: anytype) @TypeOf(val) {
const T = requireSignedInt(@TypeOf(val));
const negate_as_int = @as(T, @boolToInt(negate));
return (val ^ -negate_as_int) + negate_as_int;
}
test "Conditionally negate a value without branching" {
try expectEqual(negateIf(true, @as(i32, 50)), -50);
try expectEqual(negateIf(false, @as(i32, 50)), 50);
}
/// Merge bits from two values according to a mask"
/// https://github.com/cryptocode/bithacks#merge-bits-from-two-values-according-to-a-mask
pub fn mergeBits(first: anytype, second: @TypeOf(first), mask: @TypeOf(first)) @TypeOf(first) {
_ = requireUnsignedInt(@TypeOf(first));
return first ^ ((first ^ second) & mask);
}
test "Merge bits from two values according to a mask" {
const a: u8 = 0b10110010;
const b: u8 = 0b00001101;
// 1 = which bits to pick from a
// 0 = which bits to pick from b
const m: u8 = 0b00001111;
try expectEqual(mergeBits(a, b, m), 0b10111101);
}
/// Counting bits set (naive way)
/// https://github.com/cryptocode/bithacks#counting-bits-set-naive-way
pub fn countBitsSetNaive(val: anytype) usize {
const T = requireInt(@TypeOf(val));
var v = val;
var bits_set: T = 0;
while (v != 0) : (v >>= 1) {
bits_set +%= v & 1;
}
return @intCast(usize, bits_set);
}
test "Counting bits set (naive way)" {
try expectEqual(countBitsSetNaive(@as(u8, 0b0)), 0);
try expectEqual(countBitsSetNaive(@as(u8, 0b11100011)), 5);
try expectEqual(countBitsSetNaive(@as(u8, 0b11111111)), 8);
try expectEqual(countBitsSetNaive(@as(i8, 0b1111111)), 7);
try expectEqual(countBitsSetNaive(@as(u32, 0xffffffff)), 32);
try expectEqual(countBitsSetNaive(@as(u64, 0xffffffffffffffff)), 64);
}
/// Counting bits set by lookup table
/// https://github.com/cryptocode/bithacks#counting-bits-set-by-lookup-table
pub fn countBitsByLookupTable(val: u32) usize {
// Generate the lookup table at compile time
const bitSetTable = comptime val: {
var table: [256]u8 = undefined;
table[0] = 0;
var i: usize = 0;
while (i < 256) : (i += 1) {
table[i] = (i & 1) + table[i / 2];
}
break :val table;
};
return bitSetTable[val & 0xff] +
bitSetTable[(val >> 8) & 0xff] +
bitSetTable[(val >> 16) & 0xff] +
bitSetTable[val >> 24];
}
test "Counting bits set by lookup table" {
try expectEqual(countBitsByLookupTable(0b0), 0);
try expectEqual(countBitsByLookupTable(0b11100011), 5);
try expectEqual(countBitsByLookupTable(0b1111111), 7);
try expectEqual(countBitsByLookupTable(0b11111111), 8);
try expectEqual(countBitsByLookupTable(0xffffffff), 32);
}
/// Counting bits set, Brian Kernighan's way
/// https://github.com/cryptocode/bithacks#counting-bits-set-brian-kernighans-way
pub fn countBitsSetKernighan(val: anytype) usize {
_ = requireInt(@TypeOf(val));
var v = val;
var bits_set: usize = 0;
while (v != 0) : (bits_set += 1) {
v &= v - 1;
}
return @truncate(usize, bits_set);
}
test "Counting bits set, Brian Kernighan's way" {
try expectEqual(countBitsSetKernighan(@as(u8, 0b0)), 0);
try expectEqual(countBitsSetKernighan(@as(u8, 0b11100011)), 5);
try expectEqual(countBitsSetKernighan(@as(u8, 0b11111111)), 8);
try expectEqual(countBitsSetKernighan(@as(i8, 0b1111111)), 7);
try expectEqual(countBitsSetKernighan(@as(u32, 0xffffffff)), 32);
try expectEqual(countBitsSetKernighan(@as(u64, 0xffffffffffffffff)), 64);
}
/// Counting bits set in 14, 24, or 32-bit words using 64-bit instructions
/// https://github.com/cryptocode/bithacks#counting-bits-set-in-14-24-or-32-bit-words-using-64-bit-instructions
pub fn countBitsSetModulus(val: anytype) usize {
const T = requireInt(@TypeOf(val));
var bits_set: u64 = switch (@typeInfo(T).Int.bits) {
14 => (val * @as(u64, 0x200040008001) & @as(u64, 0x111111111111111)) % 0xf,
24 => res: {
var c: u64 = ((@intCast(u64, val) & 0xfff) * @as(u64, 0x1001001001001) & @as(u64, 0x84210842108421)) % 0x1f;
c += (((@intCast(u64, val) & 0xfff000) >> 12) * @as(u64, 0x1001001001001) & @as(u64, 0x84210842108421)) % 0x1f;
break :res c;
},
32 => res: {
var c: u64 = ((val & 0xfff) * @as(u64, 0x1001001001001) & @as(u64, 0x84210842108421)) % 0x1f;
c += (((val & 0xfff000) >> 12) * @as(u64, 0x1001001001001) & @as(u64, 0x84210842108421)) % 0x1f;
c += ((val >> 24) * @as(u64, 0x1001001001001) & @as(u64, 0x84210842108421)) % 0x1f;
break :res c;
},
else => @panic("Invalid integer size"),
};
return @truncate(usize, bits_set);
}
test "Counting bits set in 14, 24, or 32-bit words using 64-bit instructions" {
try expectEqual(countBitsSetModulus(@as(u14, 0b11111111111110)), 13);
try expectEqual(countBitsSetModulus(@as(u14, 0b11111111111111)), 14);
try expectEqual(countBitsSetModulus(@as(u24, 0b111111111111111111111110)), 23);
try expectEqual(countBitsSetModulus(@as(u24, 0b111111111111111111111111)), 24);
try expectEqual(countBitsSetModulus(@as(u32, 0b0)), 0);
try expectEqual(countBitsSetModulus(@as(u32, 0b11100011)), 5);
try expectEqual(countBitsSetModulus(@as(u32, 0b11111111)), 8);
try expectEqual(countBitsSetModulus(@as(u32, 0xfffffffe)), 31);
try expectEqual(countBitsSetModulus(@as(u32, 0xffffffff)), 32);
}
/// Counting bits set, in parallel
/// https://github.com/cryptocode/bithacks#counting-bits-set-in-parallel
pub fn countBitsSetParallel(val: anytype) @TypeOf(val) {
const T = requireUnsignedInt(@TypeOf(val));
var v = val;
var bits_set: T = 0;
const ones = ~@as(T, 0);
switch (@typeInfo(T).Int.bits) {
// Method optimized for 32 bit integers
32 => {
v = v - ((v >> 1) & 0x55555555);
v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
bits_set = ((v + (v >> 4) & 0xF0F0F0F) *% 0x1010101) >> 24;
},
// Generalized version for integers up to 128 bits in width
else => |bits| {
v = v - ((v >> 1) & @as(T, ones / 3));
v = (v & @as(T, ones / 15 * 3)) + ((v >> 2) & @as(T, ones / 15 * 3));
v = (v + (v >> 4)) & @as(T, ones / 255 * 15);
bits_set = @as(T, (v *% (@as(T, ones / 255))) >> (bits / 8 - 1) * 8);
},
}
return bits_set;
}
test "Counting bits set, in parallel" {
try expectEqual(countBitsSetParallel(@as(u16, 0xfffe)), 15);
try expectEqual(countBitsSetParallel(@as(u16, 0xffff)), 16);
try expectEqual(countBitsSetParallel(@as(u32, 0b0)), 0);
try expectEqual(countBitsSetParallel(@as(u32, 0b11100011)), 5);
try expectEqual(countBitsSetParallel(@as(u32, 0b11111111)), 8);
try expectEqual(countBitsSetParallel(@as(u32, 0xfffffffe)), 31);
try expectEqual(countBitsSetParallel(@as(u32, 0xffffffff)), 32);
try expectEqual(countBitsSetParallel(@as(u64, 0x0)), 0);
try expectEqual(countBitsSetParallel(@as(u64, 0x1)), 1);
try expectEqual(countBitsSetParallel(@as(u64, 0xfffffffffffffffe)), 63);
try expectEqual(countBitsSetParallel(@as(u64, 0xffffffffffffffff)), 64);
try expectEqual(countBitsSetParallel(@as(u128, 0x0)), 0);
try expectEqual(countBitsSetParallel(@as(u128, 0x1)), 1);
try expectEqual(countBitsSetParallel(@as(u128, 0xfffffffffffffffffffffffffffffffe)), 127);
try expectEqual(countBitsSetParallel(@as(u128, 0xffffffffffffffffffffffffffffffff)), 128);
}
/// Count bits set (rank) from the most-significant bit upto a given position
/// Returns rank of MSB bits in `val` downto LSB `pos`
/// https://github.com/cryptocode/bithacks#count-bits-set-rank-from-the-most-significant-bit-upto-a-given-position
pub fn countBitsRank(val: u64, pos: u64) u64 {
const ones = ~@as(u64, 0);
const bits = @as(u64, 64);
// The following finds the the rank of a bit, meaning it returns the sum of bits that
// are set to 1 from the most-signficant bit downto the bit at the given position.
var r: u64 = val >> @intCast(u6, (bits -% pos));
r = r - ((r >> 1) & ones / 3);
r = (r & ones / 5) + ((r >> 2) & ones / 5);
r = (r +% (r >> 4)) & ones / 17;
r = (r *% (ones / 255)) >> ((8 - 1) *% 8);
return r;
}
test "Count bits set (rank) from the most-significant bit upto a given position" {
try expectEqual((countBitsRank(0x0, 64)), 0);
try expectEqual((countBitsRank(0x1, 64)), 1);
try expectEqual((countBitsRank(0x1, 1)), 0);
try expectEqual((countBitsRank(0xefffffffffffffff, 7)), 6);
try expectEqual((countBitsRank(0xffffffffffffffff, 64)), 64);
}
/// Select the bit position (from the most-significant bit) with the given count `rank`
/// https://github.com/cryptocode/bithacks#SelectPosFromMSBRank
pub fn bitPosOfRank(val: u64, rank: u64) u64 {
const ones = ~@as(u64, 0);
// Do a normal parallel bit count for a 64-bit integer, but store all intermediate steps:
var a: u64 = val - ((val >> 1) & ones / 3);
var b: u64 = (a & ones / 5) + ((a >> 2) & ones / 5);
var c: u64 = (b +% (b >> 4)) & ones / 0x11;
var d: u64 = (c +% (c >> 8)) & ones / 0x101;
var t: u64 = (d >> 32) + (d >> 48);
var r = rank;
// Now do branchless select:
var s: u64 = 64;
s -%= (t -% r) & 256 >> @as(u6, 3);
r -%= (t & ((t -% r) >> 8));
t = (d >> @intCast(u6, (s -% @as(u64, 16)))) & 0xff;
s -%= ((t -% r) & 256) >> 4;
r -%= (t & ((t -% r) >> 8));
t = (c >> @intCast(u6, (s -% 8))) & 0xf;
s -%= ((t -% r) & 256) >> 5;
r -%= (t & ((t -% r) >> 8));
t = (b >> @intCast(u6, (s -% 4))) & 0x7;
s -%= ((t -% r) & 256) >> 6;
r -%= (t & ((t -% r) >> 8));
t = (a >> @intCast(u6, (s -% 2))) & 0x3;
s -%= ((t -% r) & 256) >> 7;
r -%= (t & ((t -% r) >> 8));
t = (val >> @intCast(u6, (s -% 1))) & 0x1;
s -%= ((t -% r) & 256) >> 8;
s = 65 -% s;
return s;
}
test "Select the bit position (from the most-significant bit) with the given count (rank)" {
try expectEqual((bitPosOfRank(0xffffffffffffffff, 64)), 64);
try expectEqual((bitPosOfRank(0x00ffffffffffffff, 1)), 9);
}
/// Computing parity the naive way
/// Returns true when an odd number of bits are set in `val`
/// https://github.com/cryptocode/bithacks#computing-parity-the-naive-way
pub fn parityNaive(val: anytype) bool {
_ = requireInt(@TypeOf(val));
var parity = false;
var v = val;
while (v != 0) {
parity = !parity;
v = v & (v - 1);
}
return parity;
}
test "Computing parity the naive way" {
try expect(!parityNaive(@as(u8, 0x0)));
try expect(!parityNaive(@as(u8, 0xf)));
try expect(!parityNaive(@as(u8, 0xff)));
try expect(parityNaive(@as(u8, 0x1)));
try expect(parityNaive(@as(u8, 0x7)));
try expect(parityNaive(@as(u32, 2)));
try expect(parityNaive(@as(u32, 4)));
try expect(parityNaive(@as(u32, 7)));
try expect(!parityNaive(@as(u32, 0)));
try expect(!parityNaive(@as(u32, 3)));
}
/// Compute parity by lookup table
/// Returns true when an odd number of bits are set in `val` which must be an 8-bit or 32-bit unsigned integer.
/// https://github.com/cryptocode/bithacks#compute-parity-by-lookup-table
pub fn parityByLookupTable(val: anytype) bool {
const T = requireUnsignedInt(@TypeOf(val));
comptime assert(@typeInfo(T).Int.bits == 8 or @typeInfo(T).Int.bits == 32);
// Generate the lookup table at compile time which determines if the n'th number has an odd number of bits.
// The table can be viewed as a 16 by 16 bit-matrix generated from a seed following these rules:
// For each row n in [0..15], if the n'th bit in the seed is 0, use the seed as the row,
// otherwise use the inverted seed as the row.
const seed: u16 = 0b0110100110010110;
var parityTable = comptime val: {
var table: [16]u16 = undefined;
var row: usize = 0;
while (row < 16) : (row += 1) {
table[row] = if (seed & (1 << (15 - row)) == 0) seed else ~seed;
}
break :val table;
};
var word = val / 16;
var bit = val % 16;
return 0 != switch (@typeInfo(T).Int.bits) {
8 => parityTable[word] & (@as(u16, 0x8000) >> @intCast(u4, bit)),
32 => res: {
var v = val;
v ^= v >> 16;
v ^= v >> 8;
const index = v & 0xff;
word = index / 16;
bit = index % 16;
break :res parityTable[word] & (@as(u16, 0x8000) >> @intCast(u4, bit));
},
else => @panic("Invalid integer size"),
};
}
test "Compute parity by lookup table" {
try expect(parityByLookupTable(@as(u8, 0x1)));
try expect(parityByLookupTable(@as(u8, 0x7)));
try expect(!parityByLookupTable(@as(u8, 0x0)));
try expect(!parityByLookupTable(@as(u8, 0xf)));
try expect(!parityByLookupTable(@as(u8, 0xff)));
try expect(parityByLookupTable(@as(u32, 2)));
try expect(parityByLookupTable(@as(u32, 4)));
try expect(parityByLookupTable(@as(u32, 7)));
try expect(!parityByLookupTable(@as(u32, 0)));
try expect(!parityByLookupTable(@as(u32, 3)));
}
/// Compute parity of a byte using 64-bit multiply and modulus division
/// https://github.com/cryptocode/bithacks#compute-parity-of-a-byte-using-64-bit-multiply-and-modulus-division
pub fn parityMulMod(val: u8) bool {
return 0 != (((val * @as(u64, 0x0101010101010101)) & @as(u64, 0x8040201008040201)) % 0x1FF) & 1;
}
test "Compute parity of a byte using 64-bit multiply and modulus division" {
try expect(!parityMulMod(0x0));
try expect(!parityMulMod(0xf));
try expect(!parityMulMod(0xff));
try expect(parityMulMod(0x1));
try expect(parityMulMod(0x7));
}
/// Compute parity of word with a multiply
/// The input `val` must be a 32 or 64 bit unsigned integer
/// https://github.com/cryptocode/bithacks#compute-parity-of-word-with-a-multiply
pub fn parityMul(val: anytype) bool {
const T = requireUnsignedInt(@TypeOf(val));
comptime assert(@typeInfo(T).Int.bits == 32 or @typeInfo(T).Int.bits == 64);
return 0 != switch (@typeInfo(T).Int.bits) {
32 => res: {
var v = val;
v ^= v >> 1;
v ^= v >> 2;
v = (v & 0x11111111) *% 0x11111111;
break :res (v >> 28) & 1;
},
64 => res: {
var v = val;
v ^= v >> 1;
v ^= v >> 2;
v = (v & 0x1111111111111111) *% 0x1111111111111111;
break :res (v >> 60) & 1;
},
else => @panic("Invalid integer size"),
};
}
test "Compute parity of word with a multiply" {
try expect(parityMul(@as(u32, 2)));
try expect(parityMul(@as(u32, 4)));
try expect(parityMul(@as(u32, 7)));
try expect(!parityMul(@as(u32, 0)));
try expect(!parityMul(@as(u32, 3)));
try expect(!parityMul(@as(u32, 0xffffffff)));
try expect(parityMul(@as(u64, 2)));
try expect(parityMul(@as(u64, 4)));
try expect(parityMul(@as(u64, 7)));
try expect(!parityMul(@as(u64, 0)));
try expect(!parityMul(@as(u64, 3)));
try expect(!parityMul(@as(u64, 0xffffffffffffffff)));
}
/// Compute parity in parallel
/// Works for 32-bit unsigned integers
pub fn parityParallel(val: u32) bool {
var v = val;
v ^= v >> 16;
v ^= v >> 8;
v ^= v >> 4;
v &= 0xf;
return 0 != ((@as(u16, 0x6996) >> @intCast(u4, v)) & 1);
}
test "Compute parity in parallel" {
try expect(parityParallel(2));
try expect(parityParallel(4));
try expect(parityParallel(7));
try expect(!parityParallel(0));
try expect(!parityParallel(3));
try expect(!parityParallel(0xffffffff));
}
/// Swapping values with subtraction and addition
/// https://github.com/cryptocode/bithacks#swapping-values-with-subtraction-and-addition
pub fn swapSubAdd(a: anytype, b: anytype) void {
if (a != b) {
a.* -%= b.*;
b.* +%= a.*;
a.* = b.* -% a.*;
}
}
test "Swapping values with subtraction and addition" {
var a: u32 = 0x1dfa8ce1;
var b: u32 = 0xffeeddcc;
swapSubAdd(&a, &b);
try expectEqual(a, 0xffeeddcc);
try expectEqual(b, 0x1dfa8ce1);
}
/// Swapping values with XOR
/// https://github.com/cryptocode/bithacks#swapping-values-with-xor
pub fn swapXor(a: anytype, b: anytype) void {
if (a != b) {
a.* ^= b.*;
b.* ^= a.*;
a.* ^= b.*;
}
}
test "Swapping values with XOR" {
var a: u32 = 0x1dfa8ce1;
var b: u32 = 0xffeeddcc;
swapXor(&a, &b);
try expectEqual(a, 0xffeeddcc);
try expectEqual(b, 0x1dfa8ce1);
}
/// Swapping individual bits with XOR
/// https://github.com/cryptocode/bithacks#swapping-individual-bits-with-xor
pub fn swapBitsXor(pos1: usize, pos2: usize, consecutiveBits: usize, val: anytype) @TypeOf(val) {
const T = requireInt(@TypeOf(val));
const shiftType = std.math.Log2Int(T);
var x: T = ((val >> @intCast(shiftType, pos1)) ^ (val >> @intCast(shiftType, pos2))) & ((@as(T, 1) << @intCast(shiftType, consecutiveBits)) - 1);
return val ^ ((x << @intCast(shiftType, pos1)) | (x << @intCast(shiftType, pos2)));
}
test "Swapping individual bits with XOR" {
try expectEqual(swapBitsXor(0, 4, 4, @as(u8, 0b11110000)), 0b00001111);
try expectEqual(swapBitsXor(0, 16, 16, @as(u32, 0xffff0000)), 0x0000ffff);
}
/// Reverse bits the obvious way
/// https://github.com/cryptocode/bithacks#reverse-bits-the-obvious-way
pub fn reverseObvious(val: anytype) @TypeOf(val) {
const T = requireInt(@TypeOf(val));
const bits = @typeInfo(T).Int.bits;
const shiftType = std.math.Log2Int(T);
var finalShiftsNeeded: shiftType = bits - 1;
var v = val >> 1;
var res = val;
while (v != 0) {
res <<= 1;
res |= v & 1;
finalShiftsNeeded -%= 1;
v >>= 1;
}
return (res << finalShiftsNeeded);
}
test "Reverse bits the obvious way" {
try expectEqual(reverseObvious(@as(u8, 0b11010010)), 0b01001011);
try expectEqual(reverseObvious(@as(u8, 0b00000001)), 0b10000000);
try expectEqual(reverseObvious(@as(u32, 0xfffffffe)), 0x7fffffff);
try expectEqual(reverseObvious(@as(u32, 0xffffffff)), 0xffffffff);
try expectEqual(reverseObvious(@as(u32, 0)), 0);
try expectEqual(reverseObvious(@as(u32, 1)), 0x80000000);
try expectEqual(reverseObvious(@as(u64, 0xfffffffffffffffe)), 0x7fffffffffffffff);
}
/// Reverse bits in word by lookup table
/// This is specific to 32-bit unsigned integers
/// https://github.com/cryptocode/bithacks#reverse-bits-in-word-by-lookup-table
pub fn reverseByLookup(val: u32) u32 {
// Generate the lookup table at compile time. This corresponds to the macro-compacted C version.
const reverseTable = comptime val: {
var tblgen = struct {
i: usize = 0,
t: [256]u8 = undefined,
pub fn R2(self: *@This(), n: u8) void {
self.t[self.i + 0] = n;
self.t[self.i + 1] = n + 2 * 64;
self.t[self.i + 2] = n + 1 * 64;
self.t[self.i + 3] = n + 3 * 64;
self.i += 4;
}
pub fn R4(self: *@This(), n: u8) void {
self.R2(n);
self.R2(n + 2 * 16);
self.R2(n + 1 * 16);
self.R2(n + 3 * 16);
}
pub fn R6(self: *@This(), n: u8) void {
self.R4(n);
self.R4(n + 2 * 4);
self.R4(n + 1 * 4);
self.R4(n + 3 * 4);
}
}{};
tblgen.R6(0);
tblgen.R6(2);
tblgen.R6(1);
tblgen.R6(3);
break :val tblgen.t;
};
return (@intCast(u32, reverseTable[val & 0xff]) << 24) |
(@intCast(u32, reverseTable[(val >> 8) & 0xff]) << 16) |
(@intCast(u32, reverseTable[(val >> 16) & 0xff]) << 8) |
(@intCast(u32, reverseTable[(val >> 24) & 0xff]));
}
test "Reverse bits in word by lookup table" {
try expectEqual(reverseByLookup(0xfffffffe), 0x7fffffff);
try expectEqual(reverseByLookup(0xffffffff), 0xffffffff);
try expectEqual(reverseByLookup(0), 0);
try expectEqual(reverseByLookup(1), 0x80000000);
}
/// Reverse the bits in a byte with 3 operations (64-bit multiply and modulus division)
/// https://github.com/cryptocode/bithacks#reverse-the-bits-in-a-byte-with-3-operations-64-bit-multiply-and-modulus-division
pub fn reverseByteMulMod(val: u8) u8 {
return @truncate(u8, (val * @as(u64, 0x0202020202) & @as(u64, 0x010884422010)) % 1023);
}
test "Reverse the bits in a byte with 3 operations (64-bit multiply and modulus division)" {
try expectEqual(reverseByteMulMod(0b11010010), 0b01001011);
try expectEqual(reverseByteMulMod(0b00000001), 0b10000000);
try expectEqual(reverseByteMulMod(0), 0);
}
/// Reverse the bits in a byte with 4 operations (64-bit multiply, no division)
/// https://github.com/cryptocode/bithacks#reverse-the-bits-in-a-byte-with-4-operations-64-bit-multiply-no-division
pub fn reverseByteMulNoDiv(val: u8) u8 {
return @truncate(u8, ((val * @as(u64, 0x80200802)) & @as(u64, 0x0884422110)) *% @as(u64, 0x0101010101) >> 32);
}
test "Reverse the bits in a byte with 4 operations (64-bit multiply, no division)" {
try expectEqual(reverseByteMulNoDiv(0b11010010), 0b01001011);
try expectEqual(reverseByteMulNoDiv(0b00000001), 0b10000000);
try expectEqual(reverseByteMulNoDiv(0), 0);
}
/// Reverse the bits in a byte with 7 operations (no 64-bit)
/// https://github.com/cryptocode/bithacks#reverse-the-bits-in-a-byte-with-7-operations-no-64-bit
pub fn reverseByte7ops(val: u8) u8 {
return @truncate(u8, ((val *% @as(u64, 0x0802) & @as(u64, 0x22110)) |
(val *% @as(u64, 0x8020) & @as(u64, 0x88440))) *% @as(u64, 0x10101) >> 16);
}
test "Reverse the bits in a byte with 7 operations (no 64-bit)" {
try expectEqual(reverseByte7ops(0b11010010), 0b01001011);
try expectEqual(reverseByte7ops(0b00000001), 0b10000000);
try expectEqual(reverseByte7ops(0), 0);
}
/// Reverse an N-bit quantity in parallel in 5 * lg(N) operations
/// https://github.com/cryptocode/bithacks#reverse-an-n-bit-quantity-in-parallel-in-5--lgn-operations
pub fn reverseInLog5steps(val: anytype) @TypeOf(val) {
const T = requireInt(@TypeOf(val));
const bits = @typeInfo(T).Int.bits;
comptime assert(std.math.isPowerOfTwo(bits));
const shiftType = std.math.Log2Int(T);
var v = val;
var s: T = bits >> 1;
var mask = ~@as(T, 0);
while (s > 0) : (s >>= 1) {
mask ^= (mask << @intCast(shiftType, s));
v = ((v >> @intCast(shiftType, s)) & mask) | ((v << @intCast(shiftType, s)) & ~mask);
}
return v;
}
test "Reverse an N-bit quantity in parallel in 5 * lg(N) operations" {
try expectEqual(reverseInLog5steps(@as(u32, 0xfffffffe)), 0x7fffffff);
try expectEqual(reverseInLog5steps(@as(u32, 0xffffffff)), 0xffffffff);
try expectEqual(reverseInLog5steps(@as(u32, 0)), 0);
try expectEqual(reverseInLog5steps(@as(u32, 1)), 0x80000000);
try expectEqual(reverseInLog5steps(@as(i32, 1)), -0x80000000);
try expectEqual(reverseInLog5steps(@as(u64, 0xfffffffffffffffe)), 0x7fffffffffffffff);
}
/// Compute modulus division by 1 << s without a division operator
/// Returns `numerator` % (1 << `shiftAmount`), i.e. `numerator` % 2^n
/// https://github.com/cryptocode/bithacks#compute-modulus-division-by-1--s-without-a-division-operator
pub fn modPow2(numerator: anytype, shiftAmount: usize) @TypeOf(numerator) {
const T = requireInt(@TypeOf(numerator));
const shiftType = std.math.Log2Int(T);
const d = @as(T, 1) << @intCast(shiftType, shiftAmount);
return numerator & (d - 1);
}
test "Compute modulus division by 1 << s without a division operator" {
try expectEqual(modPow2(@as(u32, 19), 5), 19);
try expectEqual(modPow2(@as(u32, 258), 8), 2);
try expectEqual(modPow2(@as(i64, 19), 5), 19);
}
/// Compute modulus division by (1 << s) - 1 without a division operator
/// Returns `numerator` % ((1 << `shiftAmount`) - 1)
/// https://github.com/cryptocode/bithacks#compute-modulus-division-by-1--s---1-without-a-division-operator
pub fn modPow2Minus1(numerator: anytype, shiftAmount: usize) @TypeOf(numerator) {
const T = requireInt(@TypeOf(numerator));
const shiftType = std.math.Log2Int(T);
const d = (@as(T, 1) << @intCast(shiftType, shiftAmount)) - 1;
var n = numerator;
var m: T = numerator;
while (n > d) : (n = m) {
m = 0;
while (n != 0) : (n >>= @intCast(shiftType, shiftAmount)) {
m +%= n & d;
}
}
return if (m == d) 0 else m;
}
test "Compute modulus division by (1 << s) - 1 without a division operator" {
try expectEqual(modPow2Minus1(@as(u8, 9), 3), 2);
try expectEqual(modPow2Minus1(@as(u32, 9), 3), 2);
try expectEqual(modPow2Minus1(@as(u32, 19), 3), 5);
try expectEqual(modPow2Minus1(@as(u32, 21), 2), 0);
try expectEqual(modPow2Minus1(@as(u64, 19), 3), 5);
}
/// Compute modulus division by (1 << s) - 1 in parallel without a division operator
/// https://github.com/cryptocode/bithacks#compute-modulus-division-by-1--s---1-in-parallel-without-a-division-operator
pub fn modPow2Minus1NoDiv(numerator: u32, shiftAmount: usize) u32 {
// zig fmt: off
const M: [32]u32 = .{
0x00000000, 0x55555555, 0x33333333, 0xc71c71c7,
0x0f0f0f0f, 0xc1f07c1f, 0x3f03f03f, 0xf01fc07f,
0x00ff00ff, 0x07fc01ff, 0x3ff003ff, 0xffc007ff,
0xff000fff, 0xfc001fff, 0xf0003fff, 0xc0007fff,
0x0000ffff, 0x0001ffff, 0x0003ffff, 0x0007ffff,
0x000fffff, 0x001fffff, 0x003fffff, 0x007fffff,
0x00ffffff, 0x01ffffff, 0x03ffffff, 0x07ffffff,
0x0fffffff, 0x1fffffff, 0x3fffffff, 0x7fffffff
};
const Q: [32][6]u32 = .{
.{ 0, 0, 0, 0, 0, 0}, .{16, 8, 4, 2, 1, 1}, .{16, 8, 4, 2, 2, 2},
.{15, 6, 3, 3, 3, 3}, .{16, 8, 4, 4, 4, 4}, .{15, 5, 5, 5, 5, 5},
.{12, 6, 6, 6 , 6, 6}, .{14, 7, 7, 7, 7, 7}, .{16, 8, 8, 8, 8, 8},
.{ 9, 9, 9, 9, 9, 9}, .{10, 10, 10, 10, 10, 10}, .{11, 11, 11, 11, 11, 11},
.{12, 12, 12, 12, 12, 12}, .{13, 13, 13, 13, 13, 13}, .{14, 14, 14, 14, 14, 14},
.{15, 15, 15, 15, 15, 15}, .{16, 16, 16, 16, 16, 16}, .{17, 17, 17, 17, 17, 17},
.{18, 18, 18, 18, 18, 18}, .{19, 19, 19, 19, 19, 19}, .{20, 20, 20, 20, 20, 20},
.{21, 21, 21, 21, 21, 21}, .{22, 22, 22, 22, 22, 22}, .{23, 23, 23, 23, 23, 23},
.{24, 24, 24, 24, 24, 24}, .{25, 25, 25, 25, 25, 25}, .{26, 26, 26, 26, 26, 26},
.{27, 27, 27, 27, 27, 27}, .{28, 28, 28, 28, 28, 28}, .{29, 29, 29, 29, 29, 29},
.{30, 30, 30, 30, 30, 30}, .{31, 31, 31, 31, 31, 31}
};
const R: [32][6]u32 = .{
.{0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
.{0x0000ffff, 0x000000ff, 0x0000000f, 0x00000003, 0x00000001, 0x00000001},
.{0x0000ffff, 0x000000ff, 0x0000000f, 0x00000003, 0x00000003, 0x00000003},
.{0x00007fff, 0x0000003f, 0x00000007, 0x00000007, 0x00000007, 0x00000007},
.{0x0000ffff, 0x000000ff, 0x0000000f, 0x0000000f, 0x0000000f, 0x0000000f},
.{0x00007fff, 0x0000001f, 0x0000001f, 0x0000001f, 0x0000001f, 0x0000001f},
.{0x00000fff, 0x0000003f, 0x0000003f, 0x0000003f, 0x0000003f, 0x0000003f},
.{0x00003fff, 0x0000007f, 0x0000007f, 0x0000007f, 0x0000007f, 0x0000007f},
.{0x0000ffff, 0x000000ff, 0x000000ff, 0x000000ff, 0x000000ff, 0x000000ff},
.{0x000001ff, 0x000001ff, 0x000001ff, 0x000001ff, 0x000001ff, 0x000001ff},
.{0x000003ff, 0x000003ff, 0x000003ff, 0x000003ff, 0x000003ff, 0x000003ff},
.{0x000007ff, 0x000007ff, 0x000007ff, 0x000007ff, 0x000007ff, 0x000007ff},
.{0x00000fff, 0x00000fff, 0x00000fff, 0x00000fff, 0x00000fff, 0x00000fff},
.{0x00001fff, 0x00001fff, 0x00001fff, 0x00001fff, 0x00001fff, 0x00001fff},
.{0x00003fff, 0x00003fff, 0x00003fff, 0x00003fff, 0x00003fff, 0x00003fff},
.{0x00007fff, 0x00007fff, 0x00007fff, 0x00007fff, 0x00007fff, 0x00007fff},
.{0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff},
.{0x0001ffff, 0x0001ffff, 0x0001ffff, 0x0001ffff, 0x0001ffff, 0x0001ffff},
.{0x0003ffff, 0x0003ffff, 0x0003ffff, 0x0003ffff, 0x0003ffff, 0x0003ffff},
.{0x0007ffff, 0x0007ffff, 0x0007ffff, 0x0007ffff, 0x0007ffff, 0x0007ffff},
.{0x000fffff, 0x000fffff, 0x000fffff, 0x000fffff, 0x000fffff, 0x000fffff},
.{0x001fffff, 0x001fffff, 0x001fffff, 0x001fffff, 0x001fffff, 0x001fffff},
.{0x003fffff, 0x003fffff, 0x003fffff, 0x003fffff, 0x003fffff, 0x003fffff},
.{0x007fffff, 0x007fffff, 0x007fffff, 0x007fffff, 0x007fffff, 0x007fffff},
.{0x00ffffff, 0x00ffffff, 0x00ffffff, 0x00ffffff, 0x00ffffff, 0x00ffffff},
.{0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff},
.{0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff},
.{0x07ffffff, 0x07ffffff, 0x07ffffff, 0x07ffffff, 0x07ffffff, 0x07ffffff},
.{0x0fffffff, 0x0fffffff, 0x0fffffff, 0x0fffffff, 0x0fffffff, 0x0fffffff},
.{0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff},
.{0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff},
.{0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff}
};
// zig fmt: on
const shiftType = std.math.Log2Int(u32);
const s = shiftAmount;
const d = (@as(u32, 1) << @intCast(shiftType, shiftAmount)) - 1;
var n = numerator;
var m: u32 = (n & M[s]) +% ((n >> @intCast(shiftType, s)) & M[s]);
var q: usize = 0;
var r: usize = 0;
while (m > d) : ({
q += 1;
r += 1;
}) {
m = (m >> @intCast(shiftType, Q[s][q])) +% (m & R[s][r]);
}
return if (m == d) 0 else m;
}
test "Compute modulus division by (1 << s) - 1 in parallel without a division operator" {
try expectEqual(modPow2Minus1NoDiv(9, 3), 2);
try expectEqual(modPow2Minus1NoDiv(19, 3), 5);
try expectEqual(modPow2Minus1NoDiv(21, 2), 0);
}
/// Find the log base 2 of an integer with the MSB N set in O(N) operations (the obvious way)
/// Returns ⌊log2(`val`)⌋, i.e. the position of the highest bit set.
/// https://github.com/cryptocode/bithacks#find-the-log-base-2-of-an-integer-with-the-msb-n-set-in-on-operations-the-obvious-way
pub fn log2floorObvious(val: anytype) @TypeOf(val) {
const T = requireInt(@TypeOf(val));
const shiftType = std.math.Log2Int(T);
var v: T = val;
var r: T = 0;
while (true) {
v >>= @intCast(shiftType, 1);
if (v == 0) break;
r +%= 1;
}
return r;
}
test "Find the log base 2 of an integer with the MSB N set in O(N) operations (the obvious way)" {
try expectEqual(log2floorObvious(@as(u8, 127)), 6);
try expectEqual(log2floorObvious(@as(u32, 0)), 0);
try expectEqual(log2floorObvious(@as(u32, 1)), 0);
try expectEqual(log2floorObvious(@as(u32, 2)), 1);
try expectEqual(log2floorObvious(@as(u32, 127)), 6);
try expectEqual(log2floorObvious(@as(u32, 128)), 7);
try expectEqual(log2floorObvious(@as(u32, 0xffffffff)), 31);
try expectEqual(log2floorObvious(@as(u64, 0xffffffffffffffff)), 63);
}
/// Find the integer log base 2 of an integer with an 64-bit IEEE float
/// Returns ⌊log2(`val`)⌋, i.e. the position of the highest bit set.
/// An improvement over the original is that 0 as input returns 0, and is thus consistent with `log2floorObvious`
/// https://github.com/cryptocode/bithacks#find-the-integer-log-base-2-of-an-integer-with-an-64-bit-ieee-float
pub fn log2usingFloat(val: u32) u32 {
const endian = @import("builtin").target.cpu.arch.endian();
const little_endian: bool = switch (endian) {
.Little => true,
.Big => false,
};
const U = extern union {
u: [2]u32,
d: f64,
};
if (val > 0) {
var conv: U = undefined;
conv.u[@boolToInt(little_endian)] = 0x43300000;
conv.u[@boolToInt(!little_endian)] = val;
conv.d -= 4503599627370496.0;
return (conv.u[@boolToInt(little_endian)] >> 20) -% 0x3FF;
} else {
return 0;
}
}
test "Find the integer log base 2 of an integer with an 64-bit IEEE float" {
try expectEqual(log2usingFloat(0), 0);
try expectEqual(log2usingFloat(1), 0);
try expectEqual(log2usingFloat(2), 1);
try expectEqual(log2usingFloat(127), 6);
try expectEqual(log2usingFloat(128), 7);
try expectEqual(log2usingFloat(0xffffffff), 31);
}
/// Find the log base 2 of an integer with a lookup table
/// Returns ⌊log2(`val`)⌋, i.e. the position of the highest bit set.
/// https://github.com/cryptocode/bithacks#find-the-log-base-2-of-an-integer-with-a-lookup-table
pub fn log2usingLookupTable(val: u32) u32 {
// Build log table at compile time
const logTable = comptime val: {
var table: [256]u8 = undefined;
table[0] = 0;
table[1] = 0;
var i: usize = 2;
while (i < 256) : (i += 1) {
table[i] = 1 + table[i / 2];
}
break :val table;
};
var tt: u32 = val >> 16;
var t: u32 = undefined;
if (tt != 0) {
t = tt >> 8;
return if (t != 0) 24 + logTable[t] else 16 + logTable[tt];
} else {
t = val >> 8;
return if (t != 0) 8 + logTable[t] else logTable[val];
}
}
test "Find the log base 2 of an integer with a lookup table" {
try expectEqual(log2usingLookupTable(0), 0);
try expectEqual(log2usingLookupTable(1), 0);
try expectEqual(log2usingLookupTable(2), 1);
try expectEqual(log2usingLookupTable(127), 6);
try expectEqual(log2usingLookupTable(128), 7);
try expectEqual(log2usingLookupTable(0xffffffff), 31);
}
/// Find the log base 2 of an N-bit integer in O(lg(N)) operations
/// https://github.com/cryptocode/bithacks#find-the-log-base-2-of-an-n-bit-integer-in-olgn-operations
pub fn log2inLogOps(val: u32) u32 {
const shiftType = std.math.Log2Int(u32);
const b: [5]u32 = .{ 0x2, 0xC, 0xF0, 0xFF00, 0xFFFF0000 };
const S: [5]u32 = .{ 1, 2, 4, 8, 16 };
var v = val;
var i: i4 = 4;
var res: u32 = 0;
while (i >= 0) : (i -= 1) {
const index = @intCast(usize, i);
if ((v & b[index]) != 0) {
v >>= @intCast(shiftType, S[index]);
res |= S[index];
}
}
return res;
}
test "Find the log base 2 of an N-bit integer in O(lg(N)) operations" {
try expectEqual(log2inLogOps(0), 0);
try expectEqual(log2inLogOps(1), 0);
try expectEqual(log2inLogOps(2), 1);
try expectEqual(log2inLogOps(127), 6);
try expectEqual(log2inLogOps(128), 7);
try expectEqual(log2inLogOps(0xffffffff), 31);
}
/// Find the log base 2 of an N-bit integer in O(lg(N)) operations with multiply and lookup
/// https://github.com/cryptocode/bithacks#find-the-log-base-2-of-an-n-bit-integer-in-olgn-operations-with-multiply-and-lookup
pub fn log2inLogOpsLookup(val: u32) u32 {
// zig fmt: off
const multiplyDeBruijnBitPosition: [32]u32 = .{
0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30,
8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31
};
// zig fmt: on
var v = val;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
return multiplyDeBruijnBitPosition[@as(u32, (v *% @as(u32, 0x07C4ACDD))) >> 27];
}
test "Find the log base 2 of an N-bit integer in O(lg(N)) operations with multiply and lookup" {
try expectEqual(log2inLogOpsLookup(0), 0);
try expectEqual(log2inLogOpsLookup(1), 0);
try expectEqual(log2inLogOpsLookup(2), 1);
try expectEqual(log2inLogOpsLookup(127), 6);
try expectEqual(log2inLogOpsLookup(128), 7);
try expectEqual(log2inLogOpsLookup(0xffffffff), 31);
}
/// Find integer log base 10 of an integer
/// Returns 0 if `val` is 0, otherwise ⌊log10(`val`)⌋ is returned
/// https://github.com/cryptocode/bithacks#find-integer-log-base-10-of-an-integer
pub fn log10usingPowers(val: u32) u32 {
if (val == 0) return 0;
const powersOf10: [10]u32 =
.{ 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 };
const t: u32 = (log2inLogOpsLookup(val) + 1) * 1233 >> 12; // (use a lg2 method from above)
return t - @boolToInt(val < powersOf10[t]);
}
test "Find integer log base 10 of an integer" {
try expectEqual(log10usingPowers(0), 0);
try expectEqual(log10usingPowers(1), 0);
try expectEqual(log10usingPowers(100), 2);
try expectEqual(log10usingPowers(1000), 3);
try expectEqual(log10usingPowers(1001), 3);
try expectEqual(log10usingPowers(0xfffffff), 8);
try expectEqual(log10usingPowers(0xffffffff), 9);
}
/// Find integer log base 10 of an integer the obvious way
/// https://github.com/cryptocode/bithacks#find-integer-log-base-10-of-an-integer-the-obvious-way
pub fn log10obvious(val: u32) u32 {
// zig fmt: off
return if (val >= 1000000000) @as(u32, 9)
else if (val >= 100000000) @as(u32, 8)
else if (val >= 10000000) @as(u32, 7)
else if (val >= 1000000) @as(u32, 6)
else if (val >= 100000) @as(u32, 5)
else if (val >= 10000) @as(u32, 4)
else if (val >= 1000) @as(u32, 3)
else if (val >= 100) @as(u32, 2)
else if (val >= 10) @as(u32, 1)
else 0;
// zig fmt: on
}
test "Find integer log base 10 of an integer the obvious way" {
try expectEqual(log10obvious(0), 0);
try expectEqual(log10obvious(1), 0);
try expectEqual(log10obvious(100), 2);
try expectEqual(log10obvious(1000), 3);
try expectEqual(log10obvious(1001), 3);
try expectEqual(log10obvious(0xfffffff), 8);
try expectEqual(log10obvious(0xffffffff), 9);
}
/// Find integer log base 2 of a 32-bit IEEE float
/// If `supportSubnormals` is true, a IEEE 754-compliant variant is used, otherwise a faster non-compliant variation is used
/// Returns ⌊log2(`val`)⌋
/// https://github.com/cryptocode/bithacks#find-integer-log-base-2-of-a-32-bit-ieee-float
pub fn log2float32(val: f32, comptime supportSubnormals: bool) u32 {
if (val == 0) return 0;
const U = extern union {
f: f32,
u: u32,
};
var conv: U = .{ .f = val };
var x = conv.u;
if (supportSubnormals) {
// Build log table at compile time
const logTable = comptime val: {
var table: [256]u8 = undefined;
table[0] = 0;
table[1] = 0;
var i: usize = 2;
while (i < 256) : (i += 1) {
table[i] = 1 + table[i / 2];
}
break :val table;
};
var c: u32 = x >> 23;
if (c > 0) {
c -%= 127;
} else {
// Subnormal, so recompute using mantissa: c = intlog2(x) - 149;
var t: u32 = x >> 16;
if (t > 0) {
c = logTable[t] -% 133;
} else {
t = x >> 8;
c = if (t > 0) logTable[t] -% 141 else logTable[x] -% 149;
}
}
return c;
} else {
return (x >> 23) -% 127;
}
}
test "Find integer log base 2 of a 32-bit IEEE float" {
try expectEqual(log2float32(0, false), 0);
try expectEqual(log2float32(1, false), 0);
try expectEqual(log2float32(2, false), 1);
try expectEqual(log2float32(127, false), 6);
try expectEqual(log2float32(128, false), 7);
try expectEqual(log2float32(0, true), 0);
try expectEqual(log2float32(1, true), 0);
try expectEqual(log2float32(2, true), 1);
try expectEqual(log2float32(127, true), 6);
try expectEqual(log2float32(128, true), 7);
}
/// Find integer log base 2 of the pow(2, r)-root of a 32-bit IEEE float (for unsigned integer r)
/// Input `val` must be have a normalized representation
/// https://github.com/cryptocode/bithacks#find-integer-log-base-2-of-the-pow2-r-root-of-a-32-bit-ieee-float-for-unsigned-integer-r
pub fn log2float32pow(val: f32, r: u32) u32 {
assert(std.math.isNormal(val));
const shiftType = std.math.Log2Int(u32);
const U = extern union {
f: f32,
u: u32,
};
var conv: U = .{ .f = val };
return ((((conv.u -% 0x3f800000) >> @intCast(shiftType, r)) +% 0x3f800000) >> 23) -% 127;
}
test "Find integer log base 2 of the pow(2, r)-root of a 32-bit IEEE float (for unsigned integer r)" {
try expectEqual(log2float32pow(16, 1), 2);
try expectEqual(log2float32pow(1024, 3), 1);
}
/// Count the consecutive zero bits (trailing) on the right linearly
/// https://github.com/cryptocode/bithacks#count-the-consecutive-zero-bits-trailing-on-the-right-linearly
pub fn countConsecutiveZeroBitsLinearily(val: anytype) usize {
const T = requireInt(@TypeOf(val));
var v: T = val;
var c: usize = undefined;
if (v != 0) {
v = (v ^ (v -% 1)) >> 1;
c = 0;
while (v != 0) : (c += 1) {
v >>= 1;
}
} else {
c = @typeInfo(T).Int.bits;
}
return c;
}
test "Count the consecutive zero bits (trailing) on the right linearly" {
try expectEqual(countConsecutiveZeroBitsLinearily(@as(u32, 104)), 3);
try expectEqual(countConsecutiveZeroBitsLinearily(@as(u32, 0xffffffff)), 0);
try expectEqual(countConsecutiveZeroBitsLinearily(@as(i32, 0x7fffffff)), 0);
try expectEqual(countConsecutiveZeroBitsLinearily(@as(u8, 0)), 8);
try expectEqual(countConsecutiveZeroBitsLinearily(@as(u32, 0)), 32);
try expectEqual(countConsecutiveZeroBitsLinearily(@as(u64, 0)), 64);
try expectEqual(countConsecutiveZeroBitsLinearily(@as(u128, 0)), 128);
}
/// Count the consecutive zero bits (trailing) on the right in parallel
/// https://github.com/cryptocode/bithacks#count-the-consecutive-zero-bits-trailing-on-the-right-in-parallel
pub fn countConsecutiveZeroBitsParallel(val: u32) usize {
var v: u32 = val & -%val;
var c: u32 = 32;
if (v != 0) c -%= 1;
if ((v & 0x0000FFFF) != 0) c -%= 16;
if ((v & 0x00FF00FF) != 0) c -%= 8;
if ((v & 0x0F0F0F0F) != 0) c -%= 4;
if ((v & 0x33333333) != 0) c -%= 2;
if ((v & 0x55555555) != 0) c -%= 1;
return c;
}
test "Count the consecutive zero bits (trailing) on the right in parallel" {
try expectEqual(countConsecutiveZeroBitsParallel(1), 0);
try expectEqual(countConsecutiveZeroBitsParallel(104), 3);
try expectEqual(countConsecutiveZeroBitsParallel(0xffffffff), 0);
try expectEqual(countConsecutiveZeroBitsParallel(0), 32);
}
/// Count the consecutive zero bits (trailing) on the right by binary search
/// Input `val` must be non-zero
/// An improvement over the original is that a branch is eliminated if input is known to be
/// even through passing false to `canBeOdd`
/// https://github.com/cryptocode/bithacks#count-the-consecutive-zero-bits-trailing-on-the-right-by-binary-search
pub fn countConsecutiveZeroBitsBinarySearch(val: u32, comptime canBeOdd: bool) usize {
var v: u32 = val;
var c: u32 = 32;
// If 0 == v, then c = 31.
if (canBeOdd and v & 0x1 != 0) {
// Special case for odd v (assumed to happen half of the time)
c = 0;
} else {
c = 1;
if ((v & 0xffff) == 0) {
v >>= 16;
c += 16;
}
if ((v & 0xff) == 0) {
v >>= 8;
c += 8;
}
if ((v & 0xf) == 0) {
v >>= 4;
c += 4;
}
if ((v & 0x3) == 0) {
v >>= 2;
c += 2;
}
c -= v & 0x1;
}
return c;
}
test "Count the consecutive zero bits (trailing) on the right by binary search" {
try expectEqual(countConsecutiveZeroBitsBinarySearch(1, true), 0);
try expectEqual(countConsecutiveZeroBitsBinarySearch(104, true), 3);
try expectEqual(countConsecutiveZeroBitsBinarySearch(0xffffffff, true), 0);
}
/// Count the consecutive zero bits (trailing) on the right by casting to a float
/// https://github.com/cryptocode/bithacks#count-the-consecutive-zero-bits-trailing-on-the-right-by-casting-to-a-float
pub fn countConsecutiveZeroBitsUsingFloat(val: u32) usize {
const U = extern union {
f: f32,
u: u32,
};
var conv: U = .{ .f = @intToFloat(f32, val & -%val) };
return (conv.u >> 23) - 0x7f;
}
test "Count the consecutive zero bits (trailing) on the right by casting to a float" {
try expectEqual(countConsecutiveZeroBitsUsingFloat(1), 0);
try expectEqual(countConsecutiveZeroBitsUsingFloat(104), 3);
try expectEqual(countConsecutiveZeroBitsUsingFloat(0xffffffff), 0);
}
// Count the consecutive zero bits (trailing) on the right with modulus division and lookup
// https://github.com/cryptocode/bithacks#count-the-consecutive-zero-bits-trailing-on-the-right-with-modulus-division-and-lookup
pub fn countConsecutiveZeroBitsDivLookup(val: u32) usize {
// zig fmt: off
const mod37BitPosition: [37]u32 = .{
32, 0, 1, 26, 2, 23, 27, 0, 3, 16, 24, 30, 28, 11, 0, 13, 4,
7, 17, 0, 25, 22, 31, 15, 29, 10, 12, 6, 0, 21, 14, 9, 5,
20, 8, 19, 18
};
// zig fmt: on
return mod37BitPosition[(-%val & val) % 37];
}
test "Count the consecutive zero bits (trailing) on the right with modulus division and lookup" {
try expectEqual(countConsecutiveZeroBitsDivLookup(1), 0);
try expectEqual(countConsecutiveZeroBitsDivLookup(104), 3);
try expectEqual(countConsecutiveZeroBitsDivLookup(0xffffffff), 0);
}
/// Count the consecutive zero bits (trailing) on the right with multiply and lookup
/// https://github.com/cryptocode/bithacks#count-the-consecutive-zero-bits-trailing-on-the-right-with-multiply-and-lookup
pub fn countConsecutiveZeroBitsMulLookup(val: u32) usize {
// zig fmt: off
const multiplyDeBruijnBitPosition: [32]u32 = .{
0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 };
// zig fmt: on
return multiplyDeBruijnBitPosition[((val & -%val) * @as(u32, 0x077CB531)) >> 27];
}
test "Count the consecutive zero bits (trailing) on the right with multiply and lookup" {
try expectEqual(countConsecutiveZeroBitsMulLookup(1), 0);
try expectEqual(countConsecutiveZeroBitsMulLookup(104), 3);
try expectEqual(countConsecutiveZeroBitsMulLookup(0xffffffff), 0);
}
/// Round up to the next highest power of 2 by float casting
/// https://github.com/cryptocode/bithacks#round-up-to-the-next-highest-power-of-2-by-float-casting
pub fn roundToPow2ByFloat(val: u32) u32 {
assert(val < (1 << 31));
const shiftType = std.math.Log2Int(u32);
if (val > 1) {
const U = extern union {
f: f32,
u: u32,
};
var conv: U = .{ .f = @intToFloat(f32, val) };
const t = @as(u32, 1) << @intCast(shiftType, (conv.u >> 23) -% 0x7f);
return t << @boolToInt(t < val);
} else return 1;
}
test "Round up to the next highest power of 2 by float casting" {
try expectEqual(roundToPow2ByFloat(0), 1);
try expectEqual(roundToPow2ByFloat(3), 4);
try expectEqual(roundToPow2ByFloat(7), 8);
try expectEqual(roundToPow2ByFloat(8), 8);
// Test highest supported input; higher inputs will assert
try expectEqual(roundToPow2ByFloat((1 << 31) - 1), 0x80000000);
}
/// Round up to the next highest power of 2
/// https://github.com/cryptocode/bithacks#round-up-to-the-next-highest-power-of-2
pub fn roundToPow2By(val: u32) u32 {
assert(val < (1 << 31));
var v = val -% 1;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v +%= 1;
// For consistency with `roundToPow2ByFlaot`, 0 => 1
v +%= @boolToInt(v == 0);
return v;
}
test "Round up to the next highest power of 2" {
try expectEqual(roundToPow2By(0), 1);
try expectEqual(roundToPow2By(3), 4);
try expectEqual(roundToPow2By(7), 8);
try expectEqual(roundToPow2By(8), 8);
try expectEqual(roundToPow2ByFloat((1 << 31) - 1), 0x80000000);
}
fn DoubledIntSize(comptime T: type) type {
return std.meta.Int(@typeInfo(T).Int.signedness, @typeInfo(T).Int.bits * 2);
}
/// Interleave bits the obvious way
/// Bits of `x` end up in even positions, bits of `y` in odd positions, and the interleaved result is returned
/// https://github.com/cryptocode/bithacks#interleave-bits-the-obvious-way
pub fn interleaveBitsObvious(first: anytype, second: @TypeOf(first)) DoubledIntSize(@TypeOf(first)) {
const T = @TypeOf(first);
const T2 = DoubledIntSize(T);
const bits = @typeInfo(T).Int.bits;
const shiftType = std.math.Log2Int(T2);
var res: T2 = 0;
var i: isize = 0;
while (i < bits) : (i += 1) {
var i_shift = @intCast(shiftType, i);
res |= ((first & (@as(T2, 1) << i_shift)) << i_shift) | ((second & (@as(T2, 1) << i_shift)) << @intCast(shiftType, i + 1));
}
return res;
}
test "Interleave bits the obvious way" {
try expectEqual(interleaveBitsObvious(@as(u16, 0), 0), 0);
try expectEqual(interleaveBitsObvious(@as(u16, 1), 2), 9);
try expectEqual(interleaveBitsObvious(@as(u16, 0xfefe), 0xfefe), 0xfffcfffc);
try expectEqual(interleaveBitsObvious(@as(u16, std.math.maxInt(u16)), std.math.maxInt(u16)), std.math.maxInt(u32));
try expectEqual(interleaveBitsObvious(@as(u32, std.math.maxInt(u32)), std.math.maxInt(u32)), std.math.maxInt(u64));
try expectEqual(interleaveBitsObvious(@as(u64, std.math.maxInt(u64)), std.math.maxInt(u64)), std.math.maxInt(u128));
}
/// Interleave bits by table lookup
/// https://github.com/cryptocode/bithacks#interleave-bits-by-table-lookup
pub fn interleaveBitsLookup(x: u16, y: u16) u32 {
// zig fmt: off
const mortonTable256: [256]u32 = .{
0x0000, 0x0001, 0x0004, 0x0005, 0x0010, 0x0011, 0x0014, 0x0015,
0x0040, 0x0041, 0x0044, 0x0045, 0x0050, 0x0051, 0x0054, 0x0055,
0x0140, 0x0141, 0x0144, 0x0145, 0x0150, 0x0151, 0x0154, 0x0155,
0x0100, 0x0101, 0x0104, 0x0105, 0x0110, 0x0111, 0x0114, 0x0115,
0x0400, 0x0401, 0x0404, 0x0405, 0x0410, 0x0411, 0x0414, 0x0415,
0x0440, 0x0441, 0x0444, 0x0445, 0x0450, 0x0451, 0x0454, 0x0455,
0x0500, 0x0501, 0x0504, 0x0505, 0x0510, 0x0511, 0x0514, 0x0515,
0x0540, 0x0541, 0x0544, 0x0545, 0x0550, 0x0551, 0x0554, 0x0555,
0x1000, 0x1001, 0x1004, 0x1005, 0x1010, 0x1011, 0x1014, 0x1015,
0x1040, 0x1041, 0x1044, 0x1045, 0x1050, 0x1051, 0x1054, 0x1055,
0x1100, 0x1101, 0x1104, 0x1105, 0x1110, 0x1111, 0x1114, 0x1115,
0x1140, 0x1141, 0x1144, 0x1145, 0x1150, 0x1151, 0x1154, 0x1155,
0x1400, 0x1401, 0x1404, 0x1405, 0x1410, 0x1411, 0x1414, 0x1415,
0x1440, 0x1441, 0x1444, 0x1445, 0x1450, 0x1451, 0x1454, 0x1455,
0x1500, 0x1501, 0x1504, 0x1505, 0x1510, 0x1511, 0x1514, 0x1515,
0x1540, 0x1541, 0x1544, 0x1545, 0x1550, 0x1551, 0x1554, 0x1555,
0x4000, 0x4001, 0x4004, 0x4005, 0x4010, 0x4011, 0x4014, 0x4015,
0x4040, 0x4041, 0x4044, 0x4045, 0x4050, 0x4051, 0x4054, 0x4055,
0x4100, 0x4101, 0x4104, 0x4105, 0x4110, 0x4111, 0x4114, 0x4115,
0x4140, 0x4141, 0x4144, 0x4145, 0x4150, 0x4151, 0x4154, 0x4155,
0x4400, 0x4401, 0x4404, 0x4405, 0x4410, 0x4411, 0x4414, 0x4415,
0x4440, 0x4441, 0x4444, 0x4445, 0x4450, 0x4451, 0x4454, 0x4455,
0x4500, 0x4501, 0x4504, 0x4505, 0x4510, 0x4511, 0x4514, 0x4515,
0x4540, 0x4541, 0x4544, 0x4545, 0x4550, 0x4551, 0x4554, 0x4555,
0x5000, 0x5001, 0x5004, 0x5005, 0x5010, 0x5011, 0x5014, 0x5015,
0x5040, 0x5041, 0x5044, 0x5045, 0x5050, 0x5051, 0x5054, 0x5055,
0x5100, 0x5101, 0x5104, 0x5105, 0x5110, 0x5111, 0x5114, 0x5115,
0x5140, 0x5141, 0x5144, 0x5145, 0x5150, 0x5151, 0x5154, 0x5155,
0x5400, 0x5401, 0x5404, 0x5405, 0x5410, 0x5411, 0x5414, 0x5415,
0x5440, 0x5441, 0x5444, 0x5445, 0x5450, 0x5451, 0x5454, 0x5455,
0x5500, 0x5501, 0x5504, 0x5505, 0x5510, 0x5511, 0x5514, 0x5515,
0x5540, 0x5541, 0x5544, 0x5545, 0x5550, 0x5551, 0x5554, 0x5555
};
// zig fmt: on
return mortonTable256[y >> 8] << 17 |
mortonTable256[x >> 8] << 16 |
mortonTable256[y & 0xFF] << 1 |
mortonTable256[x & 0xFF];
}
test "Interleave bits by table lookup" {
try expectEqual(interleaveBitsLookup(0, 0), 0);
try expectEqual(interleaveBitsLookup(1, 2), 9);
try expectEqual(interleaveBitsLookup(0xfefe, 0xfefe), 0xfffcfffc);
try expectEqual(interleaveBitsLookup(std.math.maxInt(u16), std.math.maxInt(u16)), std.math.maxInt(u32));
}
/// Interleave bits with 64-bit multiply
/// https://github.com/cryptocode/bithacks#interleave-bits-with-64-bit-multiply
pub fn interleaveBitsMul(first: u8, second: u8) u16 {
var x: u16 = first;
var y: u16 = second;
return @truncate(u16, ((((x *%
@as(u64, 0x0101010101010101)) & 0x8040201008040201) *%
@as(u64, 0x0102040810204081)) >> 49) & 0x5555 | (((((y *%
@as(u64, 0x0101010101010101)) & 0x8040201008040201) *%
@as(u64, 0x0102040810204081)) >> 48) & 0xaaaa));
}
test "Interleave bits with 64-bit multiply" {
try expectEqual(interleaveBitsMul(0, 0), 0);
try expectEqual(interleaveBitsMul(1, 2), 9);
try expectEqual(interleaveBitsMul(0xfe, 0xfe), 0xfffc);
try expectEqual(interleaveBitsMul(std.math.maxInt(u8), std.math.maxInt(u8)), std.math.maxInt(u16));
}
/// Interleave bits by Binary Magic Numbers
/// https://github.com/cryptocode/bithacks#interleave-bits-by-binary-magic-numbers
pub fn interleaveBitsMagic(first: u16, second: u16) u32 {
const B: [4]u32 = .{ 0x55555555, 0x33333333, 0x0f0f0f0f, 0x00ff00ff };
const S: [4]u32 = .{ 1, 2, 4, 8 };
var x: u32 = first;
var y: u32 = second;
x = (x | (x << S[3])) & B[3];
x = (x | (x << S[2])) & B[2];
x = (x | (x << S[1])) & B[1];
x = (x | (x << S[0])) & B[0];
y = (y | (y << S[3])) & B[3];
y = (y | (y << S[2])) & B[2];
y = (y | (y << S[1])) & B[1];
y = (y | (y << S[0])) & B[0];
return x | (y << 1);
}
test "Interleave bits by Binary Magic Numbers" {
try expectEqual(interleaveBitsMagic(0, 0), 0);
try expectEqual(interleaveBitsMagic(1, 2), 9);
try expectEqual(interleaveBitsMagic(0xfefe, 0xfefe), 0xfffcfffc);
try expectEqual(interleaveBitsMagic(std.math.maxInt(u16), std.math.maxInt(u16)), std.math.maxInt(u32));
}
/// Determine if a word has a zero byte
/// https://github.com/cryptocode/bithacks#determine-if-a-word-has-a-zero-byte
pub fn wordContainsZeroByte(val: u32) bool {
return (((val -% 0x01010101) & ~val) & 0x80808080) != 0;
}
test "Determine if a word has a zero byte" {
try expect(wordContainsZeroByte(0xff00ffff));
try expect(wordContainsZeroByte(0));
try expect(!wordContainsZeroByte(0xffffffff));
}
/// Determine if a word has a byte equal to `needle`
/// https://github.com/cryptocode/bithacks#determine-if-a-word-has-a-byte-equal-to-n
pub fn wordContainsByte(val: u32, needle: u8) bool {
return wordContainsZeroByte(val ^ (~@as(u32, 0) / 255 * needle));
}
test "Determine if a word has a byte equal to n" {
try expect(wordContainsByte(0xff000000, 0xff));
try expect(wordContainsByte(0x00ff0000, 0xff));
try expect(wordContainsByte(0x0000ff00, 0xff));
try expect(wordContainsByte(0x000000ff, 0xff));
try expect(wordContainsByte(0xff001c00, 0x1c));
try expect(!wordContainsByte(0xff001c00, 0xec));
}
/// Determine if a word has a byte less than `n` (which must be <= 128)
/// https://github.com/cryptocode/bithacks#determine-if-a-word-has-a-byte-less-than-n
pub fn wordContainsByteLessThan(val: anytype, n: u8) bool {
assert(n <= @as(u8, 128));
const T = requireUnsignedInt(@TypeOf(val));
return (((val -% ((~@as(T, 0) / 255) *% n)) & ~val) & ((~@as(T, 0) / 255) *% 128)) != 0;
}
/// Counts the number of bytes in x that are less than n
/// https://github.com/cryptocode/bithacks#determine-if-a-word-has-a-byte-less-than-n
pub fn countBytesLessThan(val: anytype, n: u8) usize {
assert(n <= @as(u8, 128));
const T = requireUnsignedInt(@TypeOf(val));
const maxBy255 = ~@as(T, 0) / 255;
var res = (((((maxBy255 *% @as(T, 127 +% n)) -%
(val & (maxBy255 *% 127))) & ~val) &
(maxBy255 *% 128)) / 128) % 255;
return @intCast(usize, res);
}
test "Determine if a word has a byte less than n" {
// Containment tests
try expect(wordContainsByteLessThan(@as(u32, 0), 1));
try expect(wordContainsByteLessThan(@as(u32, 0xff79ffff), 0x80));
try expect(!wordContainsByteLessThan(@as(u32, 0xffffffff), 0x80));
try expect(wordContainsByteLessThan(@as(u64, 0xff79ffffffffffff), 0x80));
// Counting tests
try expectEqual(countBytesLessThan(@as(u32, 0), 1), 4);
try expectEqual(countBytesLessThan(@as(u64, 0), 1), 8);
try expectEqual(countBytesLessThan(@as(u128, 0), 1), 16);
try expectEqual(countBytesLessThan(@as(u32, 0xff79ffff), 0x80), 1);
try expectEqual(countBytesLessThan(@as(u32, 0xffffffff), 0x80), 0);
}
/// Determine if a word has a byte greater than n
/// https://github.com/cryptocode/bithacks#determine-if-a-word-has-a-byte-greater-than-n
pub fn wordContainsByteGreaterThan(val: anytype, n: u8) bool {
assert(n <= @as(u8, 127));
const T = requireUnsignedInt(@TypeOf(val));
return (((val +% ((~@as(T, 0) / 255) *% (127 -% n))) | val) & ((~@as(T, 0) / 255) *% 128)) != 0;
}
/// Counts the number of bytes in x that are less than n where n <= 127
pub fn countBytesGreaterThan(val: anytype, n: u8) usize {
assert(n <= @as(u8, 127));
const T = requireUnsignedInt(@TypeOf(val));
const maxBy255 = ~@as(T, 0) / 255;
var res = (((((val & (maxBy255 *% 127)) +%
(maxBy255 *% (127 -% n))) | val) &
(maxBy255 *% 128)) / 128) % 255;
return @intCast(usize, res);
}
test "Determine if a word has a byte greater than n" {
// Containment tests
try expect(!wordContainsByteGreaterThan(@as(u32, 0), 1));
try expect(wordContainsByteGreaterThan(@as(u32, 0x00810000), 0x7F));
try expect(wordContainsByteGreaterThan(@as(u64, 0x0081000000000000), 0x7F));
// Counting tests
try expectEqual(countBytesGreaterThan(@as(u32, std.math.maxInt(u32)), 1), 4);
try expectEqual(countBytesGreaterThan(@as(u64, std.math.maxInt(u64)), 1), 8);
try expectEqual(countBytesGreaterThan(@as(u128, std.math.maxInt(u128)), 1), 16);
try expectEqual(countBytesGreaterThan(@as(u32, 0x00800000), 0x7F), 1);
try expectEqual(countBytesGreaterThan(@as(u32, 0x0), 0x7F), 0);
}
/// Helper to implement both predicate and counting range test
fn wordHasByteBetweenNumericResult(val: anytype, m: u8, n: u8) @TypeOf(val) {
assert(m <= @as(u8, 127));
assert(n <= @as(u8, 128));
const T = requireUnsignedInt(@TypeOf(val));
const maxBy255 = ~@as(T, 0) / 255;
return (((((maxBy255 *% (127 +% n)) -%
(val & (maxBy255 *% 127))) & ~val) &
((val & (maxBy255 *% 127)) +% (maxBy255 *% (127 -% m)))) &
(maxBy255 *% 128));
}
/// Determine if a word has a byte between m and n, where `m` <= 127 and `n` <= 128
/// https://github.com/cryptocode/bithacks#determine-if-a-word-has-a-byte-between-m-and-n
pub fn wordHasByteBetween(val: anytype, m: u8, n: u8) bool {
return wordHasByteBetweenNumericResult(val, m, n) != 0;
}
/// Count the number of bytes in `val` that are between m and n (exclusive),
/// where `m` <= 127 and `n` <= 128
/// https://github.com/cryptocode/bithacks#determine-if-a-word-has-a-byte-between-m-and-n
pub fn countBytesBetween(val: anytype, m: u8, n: u8) @TypeOf(val) {
return wordHasByteBetweenNumericResult(val, m, n) / 128 % 255;
}
test "Determine if a word has a byte between m and n" {
try expect(!wordHasByteBetween(@as(u32, 0), 1, 128));
try expect(!wordHasByteBetween(@as(u32, 0x00070000), 0x01, 0x06));
try expect(wordHasByteBetween(@as(u32, 0x00050000), 0x01, 0x06));
try expect(wordHasByteBetween(@as(u64, 0x0005000000000000), 0x01, 0x06));
// Make sure upper bound is exclusive
try expectEqual(countBytesBetween(@as(u64, 0x001a00001b001c1d), 0x01, 0x1d), 3);
try expectEqual(countBytesBetween(@as(u64, 0x00), 0, 128), 0);
try expectEqual(countBytesBetween(@as(u64, 0x01), 0, 128), 1);
try expectEqual(countBytesBetween(@as(u8, 0x01), 0, 128), 1);
try expectEqual(countBytesBetween(@as(u16, 0x0101), 0, 128), 2);
}
/// Compute the lexicographically next bit permutation
/// Given an initial `val` with N bits set, this function returns the next number
/// that also has N bits set.
/// The result is undefined if `val` is already the largest possible permutation.
/// https://github.com/cryptocode/bithacks#compute-the-lexicographically-next-bit-permutation
pub fn nextLexicographicPermutation(val: u32) u32 {
// Input's least significant 0 bits set to 1
var t = val | (val - 1);
// Set to 1 the most significant bit to change, set to 0 the least significant ones, and add the necessary 1 bits.
return (t + 1) | (((~t & -%~t) - 1) >> @intCast(u5, @ctz(u32, val) + 1));
}
test "Compute the lexicographically next bit permutation" {
try expectEqual(nextLexicographicPermutation(@as(u32, 0b00000001)), 0b00000010);
try expectEqual(nextLexicographicPermutation(@as(u32, 0b00010011)), 0b00010101);
try expectEqual(nextLexicographicPermutation(@as(u32, 0b00010101)), 0b00010110);
try expectEqual(nextLexicographicPermutation(@as(u32, 0b00010110)), 0b00011001);
try expectEqual(nextLexicographicPermutation(@as(u32, 0b00011001)), 0b00011010);
try expectEqual(nextLexicographicPermutation(@as(u32, 0b00011010)), 0b00011100);
try expectEqual(nextLexicographicPermutation(@as(u32, 0b00011100)), 0b00100011);
}
/// Clear the lowest set bit. The optimizer seems to correctly lower this to blsr and equivalent instructions.
/// (This is an addition to the original bithacks document)
pub fn clearLowestSetBit(val: anytype) @TypeOf(val) {
return val & (val - 1);
}
test "Clear least significant set bit " {
try expectEqual(clearLowestSetBit(@as(u32, 0b00000001)), 0b00000000);
try expectEqual(clearLowestSetBit(@as(u32, 0b00011010)), 0b00011000);
try expectEqual(clearLowestSetBit(@as(u64, 0b00000001)), 0b00000000);
try expectEqual(clearLowestSetBit(@as(u64, 0b000110110001101100011011000110110001101100011000)), 0b000110110001101100011011000110110001101100010000);
} | bithacks.zig |
const std = @import("std");
const display = @import("zbox");
const url = @import("./utils/url.zig");
allocator: std.mem.Allocator,
nick: []const u8,
last_message: ?*Message = null,
last_link_message: ?*Message = null,
bottom_message: ?*Message = null,
disconnected: bool = false,
const Self = @This();
pub const Message = struct {
prev: ?*Message = null,
next: ?*Message = null,
// Points to the closest previous message that contains a link.
prev_links: ?*Message = null,
next_links: ?*Message = null,
login_name: []const u8,
time: [5]u8,
// TODO: line doesn't really have a associated login name,
// check how much of a problem that is.
kind: union(enum) {
chat: Comment,
line,
raid: Raid,
resub: Resub,
sub_mistery_gift: SubMisteryGift,
sub_gift: SubGift,
sub: Sub,
},
pub const Comment = struct {
text: []const u8,
/// Author's name (w/ unicode support, empty if not present)
display_name: []const u8,
/// Total months the user was subbed (0 = non sub)
sub_months: usize,
/// Does the user have a founder badge?
is_founder: bool,
/// List of emotes and their position. Must be sorted (asc) by end position
emotes: []Emote = &[0]Emote{},
/// Moderator status
is_mod: bool = false,
/// Highlighed message by redeeming points
is_highlighted: bool = false,
};
pub const Raid = struct {
display_name: []const u8,
profile_picture_url: []const u8,
/// How many raiders
count: usize,
};
/// When somebody gifts X subs to random people
pub const SubMisteryGift = struct {
display_name: []const u8,
count: usize,
tier: SubTier,
};
pub const SubGift = struct {
sender_display_name: []const u8,
months: usize,
tier: SubTier,
recipient_login_name: []const u8,
recipient_display_name: []const u8,
};
pub const Sub = struct {
display_name: []const u8,
tier: SubTier,
};
pub const Resub = struct {
display_name: []const u8,
count: usize,
tier: SubTier,
resub_message: []const u8,
resub_message_emotes: []Emote,
};
// ------
pub const SubTier = enum { prime, t1, t2, t3 };
pub const Emote = struct {
twitch_id: []const u8,
start: usize,
end: usize,
img_data: ?[]const u8 = null, // TODO: should this be in
idx: u32 = 0, // surely this will never cause problematic bugs
// Used to sort the emote list by ending poisition.
pub fn lessThan(_: void, lhs: Emote, rhs: Emote) bool {
return lhs.end < rhs.end;
}
};
};
pub fn setConnectionStatus(self: *Self, status: enum { disconnected, reconnected }) !void {
switch (status) {
.disconnected => self.disconnected = true,
.reconnected => {
if (self.disconnected) {
self.disconnected = false;
const last = self.last_message orelse return;
if (last.kind != .line) {
// TODO print a line or something also it needs a time.
// var msg = try self.allocator.create(Message);
// msg.* = Message{ .kind = .line, .login_name = &[0]u8{}, tim };
// _ = self.addMessage(msg);
}
}
},
}
}
// Returns whether the scroll had any effect.
pub fn scroll(self: *Self, direction: enum { up, down }, n: usize) bool {
std.log.debug("scroll", .{});
var i = n;
var msg = self.bottom_message;
while (i > 0) : (i -= 1) {
if (msg) |m| {
msg = switch (direction) {
.up => m.prev,
.down => m.next,
};
if (msg != null) {
self.bottom_message = msg;
} else {
break;
}
} else {
break;
}
}
return i != n;
}
// Automatically scrolls down unless the user scrolled up.
// Returns whether there was any change in the view.
pub fn addMessage(self: *Self, msg: *Message) bool {
std.log.debug("message", .{});
// Find if the message has URLs and attach it
// to the URL linked list, unless it's our own
// message.
if (!std.mem.eql(u8, msg.login_name, self.nick)) {
switch (msg.kind) {
.chat => |c| {
var it = std.mem.tokenize(u8, c.text, " ");
while (it.next()) |word| {
if (url.sense(word)) {
if (self.last_link_message) |old| {
msg.prev_links = old;
old.next_links = msg;
}
self.last_link_message = msg;
break;
}
}
},
else => {
// TODO: when the resub msg hack gets removed
// we'll need to analize also that type of message.
},
}
}
var need_render = false;
if (self.last_message == self.bottom_message) {
// Scroll!
self.bottom_message = msg;
need_render = true;
}
if (self.last_message) |last| {
last.next = msg;
msg.prev = self.last_message;
}
self.last_message = msg;
return need_render;
}
/// TODO: we leakin, we scanning
pub fn clearChat(self: *Self, all_or_name: ?[]const u8) void {
if (all_or_name) |login_name| {
std.log.debug("clear chat: {s}", .{login_name});
var current = self.last_message;
while (current) |c| : (current = c.prev) {
if (std.mem.eql(u8, login_name, c.login_name)) {
// Update main linked list
{
if (c.prev) |p| p.next = c.next;
if (c.next) |n| n.prev = c.prev;
// If it's the last message, update the reference
if (c == self.last_message) self.last_message = c.prev;
}
// Update URLs linked list
{
if (c.prev_links) |p| p.next_links = c.next_links;
if (c.next_links) |n| n.prev_links = c.prev_links;
// If it's the last message, update the reference
if (c == self.last_link_message) self.last_link_message = c.prev_links;
}
// If it's the bottom message, scroll the view
if (self.bottom_message) |b| {
if (c == b) {
if (c.next) |n| {
self.bottom_message = n;
} else {
self.bottom_message = c.prev;
}
}
}
}
}
} else {
std.log.debug("clear chat all", .{});
self.last_message = null;
self.bottom_message = null;
self.last_link_message = null;
}
} | src/Chat.zig |
const std = @import("std");
const builtin = @import("builtin");
const Pkg = std.build.Pkg;
const string = []const u8;
pub const cache = ".zigmod/deps";
pub fn addAllTo(exe: *std.build.LibExeObjStep) void {
checkMinZig(builtin.zig_version, exe);
@setEvalBranchQuota(1_000_000);
for (packages) |pkg| {
exe.addPackage(pkg.pkg.?);
}
var llc = false;
var vcpkg = false;
inline for (comptime std.meta.declarations(package_data)) |decl| {
const pkg = @as(Package, @field(package_data, decl.name));
inline for (pkg.system_libs) |item| {
exe.linkSystemLibrary(item);
llc = true;
}
inline for (pkg.c_include_dirs) |item| {
exe.addIncludeDir(@field(dirs, decl.name) ++ "/" ++ item);
llc = true;
}
inline for (pkg.c_source_files) |item| {
exe.addCSourceFile(@field(dirs, decl.name) ++ "/" ++ item, pkg.c_source_flags);
llc = true;
}
vcpkg = vcpkg or pkg.vcpkg;
}
if (llc) exe.linkLibC();
if (builtin.os.tag == .windows and vcpkg) exe.addVcpkgPaths(.static) catch |err| @panic(@errorName(err));
}
pub const Package = struct {
directory: string,
pkg: ?Pkg = null,
c_include_dirs: []const string = &.{},
c_source_files: []const string = &.{},
c_source_flags: []const string = &.{},
system_libs: []const string = &.{},
vcpkg: bool = false,
};
fn checkMinZig(current: std.SemanticVersion, exe: *std.build.LibExeObjStep) void {
const min = std.SemanticVersion.parse("null") catch return;
if (current.order(min).compare(.lt)) @panic(exe.builder.fmt("Your Zig version v{} does not meet the minimum build requirement of v{}", .{current, min}));
}
pub const dirs = struct {
pub const _root = "";
pub const _3od6xx3o5jxd = cache ++ "/../..";
pub const _xzxo5rnug8wj = cache ++ "/v/git/github.com/MasterQ32/zig-args/commit-72a79c87fdf5aaa98f81796fbf6500b5c06b1ebc";
};
pub const package_data = struct {
pub const _3od6xx3o5jxd = Package{
.directory = dirs._3od6xx3o5jxd,
};
pub const _xzxo5rnug8wj = Package{
.directory = dirs._xzxo5rnug8wj,
.pkg = Pkg{ .name = "args", .path = .{ .path = dirs._xzxo5rnug8wj ++ "/args.zig" }, .dependencies = null },
};
pub const _root = Package{
.directory = dirs._root,
};
};
pub const packages = &[_]Package{
package_data._xzxo5rnug8wj,
};
pub const pkgs = struct {
pub const args = package_data._xzxo5rnug8wj;
};
pub const imports = struct {
pub const args = @import(".zigmod/deps/v/git/github.com/MasterQ32/zig-args/commit-72a79c87fdf5aaa98f81796fbf6500b5c06b1ebc/args.zig");
}; | deps.zig |
const std = @import("std");
const math = std.math;
const assert = std.debug.assert;
const L = std.unicode.utf8ToUtf16LeStringLiteral;
const zwin32 = @import("zwin32");
const w32 = zwin32.base;
const d3d12 = zwin32.d3d12;
const hrPanic = zwin32.hrPanic;
const hrPanicOnFail = zwin32.hrPanicOnFail;
const zd3d12 = @import("zd3d12");
const common = @import("common");
const c = common.c;
const vm = common.vectormath;
const GuiRenderer = common.GuiRenderer;
const zpix = @import("zpix");
const Vec2 = vm.Vec2;
const Vec3 = vm.Vec3;
const Vec4 = vm.Vec4;
const Mat4 = vm.Mat4;
pub export const D3D12SDKVersion: u32 = 4;
pub export const D3D12SDKPath: [*:0]const u8 = ".\\d3d12\\";
const content_dir = @import("build_options").content_dir;
const window_name = "zig-gamedev: simple raytracer";
const window_width = 1920;
const window_height = 1080;
const Vertex = struct {
position: Vec3,
normal: Vec3,
texcoords0: Vec2,
tangent: Vec4,
};
const Mesh = struct {
index_offset: u32,
vertex_offset: u32,
num_indices: u32,
num_vertices: u32,
material_index: u32,
};
const Material = struct {
base_color: Vec3,
roughness: f32,
metallic: f32,
base_color_tex_index: u16,
metallic_roughness_tex_index: u16,
normal_tex_index: u16,
};
const ResourceView = struct {
resource: zd3d12.ResourceHandle,
view: d3d12.CPU_DESCRIPTOR_HANDLE,
};
const PsoStaticMesh_FrameConst = struct {
object_to_clip: Mat4,
object_to_world: Mat4,
camera_position: Vec3,
padding0: f32 = 0.0,
light_position: Vec3,
draw_mode: i32, // 0 - no shadows, 1 - shadows, 2 - shadow mask
};
comptime {
assert(@sizeOf(PsoStaticMesh_FrameConst) == 128 + 32);
}
const PsoZPrePass_FrameConst = struct {
object_to_clip: Mat4,
};
const PsoGenShadowRays_FrameConst = struct {
object_to_clip: Mat4,
object_to_world: Mat4,
};
const PsoTraceShadowRays_FrameConst = struct {
light_position: Vec3,
padding0: f32 = 0.0,
};
const DemoState = struct {
gctx: zd3d12.GraphicsContext,
guir: GuiRenderer,
frame_stats: common.FrameStats,
static_mesh_pso: zd3d12.PipelineHandle,
z_pre_pass_pso: zd3d12.PipelineHandle,
gen_shadow_rays_pso: zd3d12.PipelineHandle,
trace_shadow_rays_stateobj: ?*d3d12.IStateObject,
trace_shadow_rays_rs: ?*d3d12.IRootSignature,
trace_shadow_rays_table: zd3d12.ResourceHandle,
depth_texture: zd3d12.ResourceHandle,
depth_texture_dsv: d3d12.CPU_DESCRIPTOR_HANDLE,
depth_texture_srv: d3d12.CPU_DESCRIPTOR_HANDLE,
shadow_rays_texture: zd3d12.ResourceHandle,
shadow_rays_texture_rtv: d3d12.CPU_DESCRIPTOR_HANDLE,
shadow_rays_texture_srv: d3d12.CPU_DESCRIPTOR_HANDLE,
shadow_mask_texture: zd3d12.ResourceHandle,
shadow_mask_texture_uav: d3d12.CPU_DESCRIPTOR_HANDLE,
shadow_mask_texture_srv: d3d12.CPU_DESCRIPTOR_HANDLE,
vertex_buffer: ResourceView,
index_buffer: ResourceView,
blas_buffer: zd3d12.ResourceHandle,
tlas_buffer: zd3d12.ResourceHandle,
meshes: std.ArrayList(Mesh),
materials: std.ArrayList(Material),
textures: std.ArrayList(ResourceView),
camera: struct {
position: Vec3,
forward: Vec3,
pitch: f32,
yaw: f32,
},
mouse: struct {
cursor_prev_x: i32,
cursor_prev_y: i32,
},
light_position: Vec3,
dxr_is_supported: bool,
dxr_draw_mode: i32, // 0 - no shadows, 1 - shadows, 2 - shadow mask
};
fn parseAndLoadGltfFile(gltf_path: []const u8) *c.cgltf_data {
var data: *c.cgltf_data = undefined;
const options = std.mem.zeroes(c.cgltf_options);
// Parse.
{
const result = c.cgltf_parse_file(&options, gltf_path.ptr, @ptrCast([*c][*c]c.cgltf_data, &data));
assert(result == c.cgltf_result_success);
}
// Load.
{
const result = c.cgltf_load_buffers(&options, data, gltf_path.ptr);
assert(result == c.cgltf_result_success);
}
return data;
}
fn appendMeshPrimitive(
data: *c.cgltf_data,
mesh_index: u32,
prim_index: u32,
indices: *std.ArrayList(u32),
positions: *std.ArrayList(Vec3),
normals: ?*std.ArrayList(Vec3),
texcoords0: ?*std.ArrayList(Vec2),
tangents: ?*std.ArrayList(Vec4),
) void {
assert(mesh_index < data.meshes_count);
assert(prim_index < data.meshes[mesh_index].primitives_count);
const num_vertices: u32 = @intCast(u32, data.meshes[mesh_index].primitives[prim_index].attributes[0].data.*.count);
const num_indices: u32 = @intCast(u32, data.meshes[mesh_index].primitives[prim_index].indices.*.count);
// Indices.
{
indices.ensureTotalCapacity(indices.items.len + num_indices) catch unreachable;
const accessor = data.meshes[mesh_index].primitives[prim_index].indices;
assert(accessor.*.buffer_view != null);
assert(accessor.*.stride == accessor.*.buffer_view.*.stride or accessor.*.buffer_view.*.stride == 0);
assert((accessor.*.stride * accessor.*.count) == accessor.*.buffer_view.*.size);
assert(accessor.*.buffer_view.*.buffer.*.data != null);
const data_addr = @alignCast(4, @ptrCast([*]const u8, accessor.*.buffer_view.*.buffer.*.data) +
accessor.*.offset + accessor.*.buffer_view.*.offset);
if (accessor.*.stride == 1) {
assert(accessor.*.component_type == c.cgltf_component_type_r_8u);
const src = @ptrCast([*]const u8, data_addr);
var i: u32 = 0;
while (i < num_indices) : (i += 1) {
indices.appendAssumeCapacity(src[i]);
}
} else if (accessor.*.stride == 2) {
assert(accessor.*.component_type == c.cgltf_component_type_r_16u);
const src = @ptrCast([*]const u16, data_addr);
var i: u32 = 0;
while (i < num_indices) : (i += 1) {
indices.appendAssumeCapacity(src[i]);
}
} else if (accessor.*.stride == 4) {
assert(accessor.*.component_type == c.cgltf_component_type_r_32u);
const src = @ptrCast([*]const u32, data_addr);
var i: u32 = 0;
while (i < num_indices) : (i += 1) {
indices.appendAssumeCapacity(src[i]);
}
} else {
unreachable;
}
}
// Attributes.
{
positions.resize(positions.items.len + num_vertices) catch unreachable;
if (normals != null) normals.?.resize(normals.?.items.len + num_vertices) catch unreachable;
if (texcoords0 != null) texcoords0.?.resize(texcoords0.?.items.len + num_vertices) catch unreachable;
if (tangents != null) tangents.?.resize(tangents.?.items.len + num_vertices) catch unreachable;
const num_attribs: u32 = @intCast(u32, data.meshes[mesh_index].primitives[prim_index].attributes_count);
var attrib_index: u32 = 0;
while (attrib_index < num_attribs) : (attrib_index += 1) {
const attrib = &data.meshes[mesh_index].primitives[prim_index].attributes[attrib_index];
const accessor = attrib.data;
assert(accessor.*.buffer_view != null);
assert(accessor.*.stride == accessor.*.buffer_view.*.stride or accessor.*.buffer_view.*.stride == 0);
assert((accessor.*.stride * accessor.*.count) == accessor.*.buffer_view.*.size);
assert(accessor.*.buffer_view.*.buffer.*.data != null);
const data_addr = @ptrCast([*]const u8, accessor.*.buffer_view.*.buffer.*.data) +
accessor.*.offset + accessor.*.buffer_view.*.offset;
if (attrib.*.type == c.cgltf_attribute_type_position) {
assert(accessor.*.type == c.cgltf_type_vec3);
assert(accessor.*.component_type == c.cgltf_component_type_r_32f);
@memcpy(
@ptrCast([*]u8, &positions.items[positions.items.len - num_vertices]),
data_addr,
accessor.*.count * accessor.*.stride,
);
} else if (attrib.*.type == c.cgltf_attribute_type_normal and normals != null) {
assert(accessor.*.type == c.cgltf_type_vec3);
assert(accessor.*.component_type == c.cgltf_component_type_r_32f);
@memcpy(
@ptrCast([*]u8, &normals.?.items[normals.?.items.len - num_vertices]),
data_addr,
accessor.*.count * accessor.*.stride,
);
} else if (attrib.*.type == c.cgltf_attribute_type_texcoord and texcoords0 != null) {
assert(accessor.*.type == c.cgltf_type_vec2);
assert(accessor.*.component_type == c.cgltf_component_type_r_32f);
@memcpy(
@ptrCast([*]u8, &texcoords0.?.items[texcoords0.?.items.len - num_vertices]),
data_addr,
accessor.*.count * accessor.*.stride,
);
} else if (attrib.*.type == c.cgltf_attribute_type_tangent and tangents != null) {
assert(accessor.*.type == c.cgltf_type_vec4);
assert(accessor.*.component_type == c.cgltf_component_type_r_32f);
@memcpy(
@ptrCast([*]u8, &tangents.?.items[tangents.?.items.len - num_vertices]),
data_addr,
accessor.*.count * accessor.*.stride,
);
}
}
}
}
fn loadScene(
arena: std.mem.Allocator,
gctx: *zd3d12.GraphicsContext,
all_meshes: *std.ArrayList(Mesh),
all_vertices: *std.ArrayList(Vertex),
all_indices: *std.ArrayList(u32),
all_materials: *std.ArrayList(Material),
all_textures: *std.ArrayList(ResourceView),
) void {
var indices = std.ArrayList(u32).init(arena);
var positions = std.ArrayList(Vec3).init(arena);
var normals = std.ArrayList(Vec3).init(arena);
var texcoords0 = std.ArrayList(Vec2).init(arena);
var tangents = std.ArrayList(Vec4).init(arena);
const data = parseAndLoadGltfFile(content_dir ++ "Sponza/Sponza.gltf");
defer c.cgltf_free(data);
const num_meshes = @intCast(u32, data.meshes_count);
var mesh_index: u32 = 0;
while (mesh_index < num_meshes) : (mesh_index += 1) {
const num_prims = @intCast(u32, data.meshes[mesh_index].primitives_count);
var prim_index: u32 = 0;
while (prim_index < num_prims) : (prim_index += 1) {
const pre_indices_len = indices.items.len;
const pre_positions_len = positions.items.len;
appendMeshPrimitive(data, mesh_index, prim_index, &indices, &positions, &normals, &texcoords0, &tangents);
const num_materials = @intCast(u32, data.materials_count);
var material_index: u32 = 0;
var assigned_material_index: u32 = 0xffff_ffff;
while (material_index < num_materials) : (material_index += 1) {
const prim = &data.meshes[mesh_index].primitives[prim_index];
if (prim.material == &data.materials[material_index]) {
assigned_material_index = material_index;
break;
}
}
assert(assigned_material_index != 0xffff_ffff);
all_meshes.append(.{
.index_offset = @intCast(u32, pre_indices_len),
.vertex_offset = @intCast(u32, pre_positions_len),
.num_indices = @intCast(u32, indices.items.len - pre_indices_len),
.num_vertices = @intCast(u32, positions.items.len - pre_positions_len),
.material_index = assigned_material_index,
}) catch unreachable;
}
}
all_indices.ensureTotalCapacity(indices.items.len) catch unreachable;
for (indices.items) |index| {
all_indices.appendAssumeCapacity(index);
}
all_vertices.ensureTotalCapacity(positions.items.len) catch unreachable;
for (positions.items) |_, index| {
all_vertices.appendAssumeCapacity(.{
.position = positions.items[index].scale(0.008), // NOTE(mziulek): Sponza requires scaling.
.normal = normals.items[index],
.texcoords0 = texcoords0.items[index],
.tangent = tangents.items[index],
});
}
const num_materials = @intCast(u32, data.materials_count);
var material_index: u32 = 0;
all_materials.ensureTotalCapacity(num_materials) catch unreachable;
while (material_index < num_materials) : (material_index += 1) {
const gltf_material = &data.materials[material_index];
assert(gltf_material.has_pbr_metallic_roughness == 1);
const mr = &gltf_material.pbr_metallic_roughness;
const num_images = @intCast(u32, data.images_count);
const invalid_image_index = num_images;
var base_color_tex_index: u32 = invalid_image_index;
var metallic_roughness_tex_index: u32 = invalid_image_index;
var normal_tex_index: u32 = invalid_image_index;
var image_index: u32 = 0;
while (image_index < num_images) : (image_index += 1) {
const image = &data.images[image_index];
assert(image.uri != null);
if (mr.base_color_texture.texture != null and
mr.base_color_texture.texture.*.image.*.uri == image.uri)
{
assert(base_color_tex_index == invalid_image_index);
base_color_tex_index = image_index;
}
if (mr.metallic_roughness_texture.texture != null and
mr.metallic_roughness_texture.texture.*.image.*.uri == image.uri)
{
assert(metallic_roughness_tex_index == invalid_image_index);
metallic_roughness_tex_index = image_index;
}
if (gltf_material.normal_texture.texture != null and
gltf_material.normal_texture.texture.*.image.*.uri == image.uri)
{
assert(normal_tex_index == invalid_image_index);
normal_tex_index = image_index;
}
}
assert(base_color_tex_index != invalid_image_index);
all_materials.appendAssumeCapacity(.{
.base_color = Vec3.init(mr.base_color_factor[0], mr.base_color_factor[1], mr.base_color_factor[2]),
.roughness = mr.roughness_factor,
.metallic = mr.metallic_factor,
.base_color_tex_index = @intCast(u16, base_color_tex_index),
.metallic_roughness_tex_index = @intCast(u16, metallic_roughness_tex_index),
.normal_tex_index = @intCast(u16, normal_tex_index),
});
}
const num_images = @intCast(u32, data.images_count);
var image_index: u32 = 0;
all_textures.ensureTotalCapacity(num_images + 1) catch unreachable;
while (image_index < num_images) : (image_index += 1) {
const image = &data.images[image_index];
var buffer: [64]u8 = undefined;
const path = std.fmt.bufPrint(
buffer[0..],
content_dir ++ "Sponza/{s}",
.{image.uri},
) catch unreachable;
const texture = gctx.createAndUploadTex2dFromFile(path, .{}) catch unreachable;
const view = gctx.allocateCpuDescriptors(.CBV_SRV_UAV, 1);
gctx.device.CreateShaderResourceView(gctx.lookupResource(texture).?, null, view);
all_textures.appendAssumeCapacity(.{ .resource = texture, .view = view });
}
const texture_4x4 = ResourceView{
.resource = gctx.createCommittedResource(
.DEFAULT,
d3d12.HEAP_FLAG_NONE,
&d3d12.RESOURCE_DESC.initTex2d(.R8G8B8A8_UNORM, 4, 4, 0),
d3d12.RESOURCE_STATE_PIXEL_SHADER_RESOURCE,
null,
) catch |err| hrPanic(err),
.view = gctx.allocateCpuDescriptors(.CBV_SRV_UAV, 1),
};
gctx.device.CreateShaderResourceView(gctx.lookupResource(texture_4x4.resource).?, null, texture_4x4.view);
all_textures.appendAssumeCapacity(texture_4x4);
}
fn init(allocator: std.mem.Allocator) !DemoState {
const window = try common.initWindow(allocator, window_name, window_width, window_height);
var arena_allocator_state = std.heap.ArenaAllocator.init(allocator);
defer arena_allocator_state.deinit();
const arena_allocator = arena_allocator_state.allocator();
_ = zpix.loadGpuCapturerLibrary();
_ = zpix.setTargetWindow(window);
_ = zpix.beginCapture(
zpix.CAPTURE_GPU,
&zpix.CaptureParameters{ .gpu_capture_params = .{ .FileName = L("capture.wpix") } },
);
var gctx = zd3d12.GraphicsContext.init(allocator, window);
// Check for DirectX Raytracing (DXR) support.
const dxr_is_supported = blk: {
var options5: d3d12.FEATURE_DATA_D3D12_OPTIONS5 = undefined;
const res = gctx.device.CheckFeatureSupport(.OPTIONS5, &options5, @sizeOf(d3d12.FEATURE_DATA_D3D12_OPTIONS5));
break :blk options5.RaytracingTier != .NOT_SUPPORTED and res == w32.S_OK;
};
const dxr_draw_mode = @boolToInt(dxr_is_supported);
const static_mesh_pso = blk: {
var pso_desc = d3d12.GRAPHICS_PIPELINE_STATE_DESC.initDefault();
pso_desc.RTVFormats[0] = .R8G8B8A8_UNORM;
pso_desc.NumRenderTargets = 1;
pso_desc.BlendState.RenderTarget[0].RenderTargetWriteMask = 0xf;
pso_desc.DSVFormat = .D32_FLOAT;
pso_desc.DepthStencilState.DepthFunc = .LESS_EQUAL;
pso_desc.PrimitiveTopologyType = .TRIANGLE;
break :blk gctx.createGraphicsShaderPipeline(
arena_allocator,
&pso_desc,
content_dir ++ "shaders/rast_static_mesh.vs.cso",
content_dir ++ "shaders/rast_static_mesh.ps.cso",
);
};
const z_pre_pass_pso = blk: {
var pso_desc = d3d12.GRAPHICS_PIPELINE_STATE_DESC.initDefault();
pso_desc.RTVFormats[0] = .UNKNOWN;
pso_desc.NumRenderTargets = 0;
pso_desc.BlendState.RenderTarget[0].RenderTargetWriteMask = 0x0;
pso_desc.DSVFormat = .D32_FLOAT;
pso_desc.PrimitiveTopologyType = .TRIANGLE;
break :blk gctx.createGraphicsShaderPipeline(
arena_allocator,
&pso_desc,
content_dir ++ "shaders/z_pre_pass.vs.cso",
content_dir ++ "shaders/z_pre_pass.ps.cso",
);
};
const gen_shadow_rays_pso = blk: {
var pso_desc = d3d12.GRAPHICS_PIPELINE_STATE_DESC.initDefault();
pso_desc.RTVFormats[0] = .R32G32B32A32_FLOAT;
pso_desc.NumRenderTargets = 1;
pso_desc.BlendState.RenderTarget[0].RenderTargetWriteMask = 0xf;
pso_desc.DSVFormat = .D32_FLOAT;
pso_desc.DepthStencilState.DepthWriteMask = .ZERO;
pso_desc.DepthStencilState.DepthFunc = .LESS_EQUAL;
pso_desc.PrimitiveTopologyType = .TRIANGLE;
break :blk gctx.createGraphicsShaderPipeline(
arena_allocator,
&pso_desc,
content_dir ++ "shaders/gen_shadow_rays.vs.cso",
content_dir ++ "shaders/gen_shadow_rays.ps.cso",
);
};
// Create 'trace shadow rays' RT state object.
var trace_shadow_rays_stateobj: ?*d3d12.IStateObject = null;
var trace_shadow_rays_rs: ?*d3d12.IRootSignature = null;
if (dxr_is_supported) {
const cso_file = std.fs.cwd().openFile(
content_dir ++ "shaders/trace_shadow_rays.lib.cso",
.{},
) catch unreachable;
defer cso_file.close();
const cso_code = cso_file.reader().readAllAlloc(arena_allocator, 256 * 1024) catch unreachable;
const lib_desc = d3d12.DXIL_LIBRARY_DESC{
.DXILLibrary = .{ .pShaderBytecode = cso_code.ptr, .BytecodeLength = cso_code.len },
.NumExports = 0,
.pExports = null,
};
const subobject = d3d12.STATE_SUBOBJECT{
.Type = .DXIL_LIBRARY,
.pDesc = &lib_desc,
};
const state_object_desc = d3d12.STATE_OBJECT_DESC{
.Type = .RAYTRACING_PIPELINE,
.NumSubobjects = 1,
.pSubobjects = @ptrCast([*]const d3d12.STATE_SUBOBJECT, &subobject),
};
hrPanicOnFail(gctx.device.CreateStateObject(
&state_object_desc,
&d3d12.IID_IStateObject,
@ptrCast(*?*anyopaque, &trace_shadow_rays_stateobj),
));
hrPanicOnFail(gctx.device.CreateRootSignature(
0,
cso_code.ptr,
cso_code.len,
&d3d12.IID_IRootSignature,
@ptrCast(*?*anyopaque, &trace_shadow_rays_rs),
));
}
const trace_shadow_rays_table = gctx.createCommittedResource(
.DEFAULT,
d3d12.HEAP_FLAG_NONE,
&d3d12.RESOURCE_DESC.initBuffer(64 * 1024),
d3d12.RESOURCE_STATE_COPY_DEST,
null,
) catch |err| hrPanic(err);
const depth_texture = gctx.createCommittedResource(
.DEFAULT,
d3d12.HEAP_FLAG_NONE,
&blk: {
var desc = d3d12.RESOURCE_DESC.initTex2d(.D32_FLOAT, gctx.viewport_width, gctx.viewport_height, 1);
desc.Flags = d3d12.RESOURCE_FLAG_ALLOW_DEPTH_STENCIL;
break :blk desc;
},
d3d12.RESOURCE_STATE_DEPTH_WRITE,
&d3d12.CLEAR_VALUE.initDepthStencil(.D32_FLOAT, 1.0, 0),
) catch |err| hrPanic(err);
const depth_texture_dsv = gctx.allocateCpuDescriptors(.DSV, 1);
const depth_texture_srv = gctx.allocateCpuDescriptors(.CBV_SRV_UAV, 1);
gctx.device.CreateDepthStencilView(gctx.lookupResource(depth_texture).?, null, depth_texture_dsv);
gctx.device.CreateShaderResourceView(
gctx.lookupResource(depth_texture).?,
&d3d12.SHADER_RESOURCE_VIEW_DESC{
.Format = .R32_FLOAT,
.ViewDimension = .TEXTURE2D,
.Shader4ComponentMapping = d3d12.DEFAULT_SHADER_4_COMPONENT_MAPPING,
.u = .{
.Texture2D = .{
.MostDetailedMip = 0,
.MipLevels = 1,
.PlaneSlice = 0,
.ResourceMinLODClamp = 0.0,
},
},
},
depth_texture_srv,
);
const shadow_rays_texture = gctx.createCommittedResource(
.DEFAULT,
d3d12.HEAP_FLAG_NONE,
&blk: {
var desc = d3d12.RESOURCE_DESC.initTex2d(
.R32G32B32A32_FLOAT,
gctx.viewport_width,
gctx.viewport_height,
1,
);
desc.Flags = d3d12.RESOURCE_FLAG_ALLOW_RENDER_TARGET;
break :blk desc;
},
d3d12.RESOURCE_STATE_RENDER_TARGET,
&d3d12.CLEAR_VALUE.initColor(.R32G32B32A32_FLOAT, &.{ 0.0, 0.0, 0.0, 0.0 }),
) catch |err| hrPanic(err);
const shadow_rays_texture_rtv = gctx.allocateCpuDescriptors(.RTV, 1);
const shadow_rays_texture_srv = gctx.allocateCpuDescriptors(.CBV_SRV_UAV, 1);
gctx.device.CreateRenderTargetView(
gctx.lookupResource(shadow_rays_texture).?,
null,
shadow_rays_texture_rtv,
);
gctx.device.CreateShaderResourceView(
gctx.lookupResource(shadow_rays_texture).?,
null,
shadow_rays_texture_srv,
);
const shadow_mask_texture = gctx.createCommittedResource(
.DEFAULT,
d3d12.HEAP_FLAG_NONE,
&blk: {
var desc = d3d12.RESOURCE_DESC.initTex2d(.R32_FLOAT, gctx.viewport_width, gctx.viewport_height, 1);
desc.Flags = d3d12.RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
break :blk desc;
},
d3d12.RESOURCE_STATE_UNORDERED_ACCESS,
null,
) catch |err| hrPanic(err);
const shadow_mask_texture_uav = gctx.allocateCpuDescriptors(.CBV_SRV_UAV, 1);
const shadow_mask_texture_srv = gctx.allocateCpuDescriptors(.CBV_SRV_UAV, 1);
gctx.device.CreateUnorderedAccessView(
gctx.lookupResource(shadow_mask_texture).?,
null,
null,
shadow_mask_texture_uav,
);
gctx.device.CreateShaderResourceView(
gctx.lookupResource(shadow_mask_texture).?,
null,
shadow_mask_texture_srv,
);
var mipgen_rgba8 = zd3d12.MipmapGenerator.init(arena_allocator, &gctx, .R8G8B8A8_UNORM, content_dir);
//
// Begin frame to init/upload resources on the GPU.
//
gctx.beginFrame();
var guir = GuiRenderer.init(arena_allocator, &gctx, 1, content_dir);
var all_meshes = std.ArrayList(Mesh).init(allocator);
var all_vertices = std.ArrayList(Vertex).init(arena_allocator);
var all_indices = std.ArrayList(u32).init(arena_allocator);
var all_materials = std.ArrayList(Material).init(allocator);
var all_textures = std.ArrayList(ResourceView).init(allocator);
loadScene(
arena_allocator,
&gctx,
&all_meshes,
&all_vertices,
&all_indices,
&all_materials,
&all_textures,
);
for (all_textures.items) |texture| {
mipgen_rgba8.generateMipmaps(&gctx, texture.resource);
gctx.addTransitionBarrier(texture.resource, d3d12.RESOURCE_STATE_PIXEL_SHADER_RESOURCE);
}
gctx.flushResourceBarriers();
const vertex_buffer = .{
.resource = gctx.createCommittedResource(
.DEFAULT,
d3d12.HEAP_FLAG_NONE,
&d3d12.RESOURCE_DESC.initBuffer(all_vertices.items.len * @sizeOf(Vertex)),
d3d12.RESOURCE_STATE_COPY_DEST,
null,
) catch |err| hrPanic(err),
.view = gctx.allocateCpuDescriptors(.CBV_SRV_UAV, 1),
};
gctx.device.CreateShaderResourceView(
gctx.lookupResource(vertex_buffer.resource).?,
&d3d12.SHADER_RESOURCE_VIEW_DESC.initStructuredBuffer(
0,
@intCast(u32, all_vertices.items.len),
@sizeOf(Vertex),
),
vertex_buffer.view,
);
const index_buffer = .{
.resource = gctx.createCommittedResource(
.DEFAULT,
d3d12.HEAP_FLAG_NONE,
&d3d12.RESOURCE_DESC.initBuffer(all_indices.items.len * @sizeOf(u32)),
d3d12.RESOURCE_STATE_COPY_DEST,
null,
) catch |err| hrPanic(err),
.view = gctx.allocateCpuDescriptors(.CBV_SRV_UAV, 1),
};
gctx.device.CreateShaderResourceView(
gctx.lookupResource(index_buffer.resource).?,
&d3d12.SHADER_RESOURCE_VIEW_DESC.initTypedBuffer(.R32_UINT, 0, @intCast(u32, all_indices.items.len)),
index_buffer.view,
);
// Upload vertex buffer.
{
const upload = gctx.allocateUploadBufferRegion(Vertex, @intCast(u32, all_vertices.items.len));
for (all_vertices.items) |vertex, i| {
upload.cpu_slice[i] = vertex;
}
gctx.cmdlist.CopyBufferRegion(
gctx.lookupResource(vertex_buffer.resource).?,
0,
upload.buffer,
upload.buffer_offset,
upload.cpu_slice.len * @sizeOf(@TypeOf(upload.cpu_slice[0])),
);
gctx.addTransitionBarrier(vertex_buffer.resource, d3d12.RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE);
gctx.flushResourceBarriers();
}
// Upload index buffer.
{
const upload = gctx.allocateUploadBufferRegion(u32, @intCast(u32, all_indices.items.len));
for (all_indices.items) |index, i| {
upload.cpu_slice[i] = index;
}
gctx.cmdlist.CopyBufferRegion(
gctx.lookupResource(index_buffer.resource).?,
0,
upload.buffer,
upload.buffer_offset,
upload.cpu_slice.len * @sizeOf(@TypeOf(upload.cpu_slice[0])),
);
gctx.addTransitionBarrier(index_buffer.resource, d3d12.RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE);
gctx.flushResourceBarriers();
}
var temp_resources = std.ArrayList(zd3d12.ResourceHandle).init(arena_allocator);
// Create "Bottom Level Acceleration Structure" (blas).
const blas_buffer = if (dxr_is_supported) blk_blas: {
var geometry_descs = std.ArrayList(d3d12.RAYTRACING_GEOMETRY_DESC).initCapacity(
arena_allocator,
all_meshes.items.len,
) catch unreachable;
const vertex_buffer_addr = gctx.lookupResource(vertex_buffer.resource).?.GetGPUVirtualAddress();
const index_buffer_addr = gctx.lookupResource(index_buffer.resource).?.GetGPUVirtualAddress();
for (all_meshes.items) |mesh| {
const desc = d3d12.RAYTRACING_GEOMETRY_DESC{
.Flags = d3d12.RAYTRACING_GEOMETRY_FLAG_OPAQUE,
.Type = .TRIANGLES,
.u = .{
.Triangles = .{
.Transform3x4 = 0,
.IndexFormat = .R32_UINT,
.VertexFormat = .R32G32B32_FLOAT,
.IndexCount = mesh.num_indices,
.VertexCount = mesh.num_vertices,
.IndexBuffer = index_buffer_addr + mesh.index_offset * @sizeOf(u32),
.VertexBuffer = .{
.StrideInBytes = @sizeOf(Vertex),
.StartAddress = vertex_buffer_addr + mesh.vertex_offset * @sizeOf(Vertex),
},
},
},
};
geometry_descs.appendAssumeCapacity(desc);
}
const blas_inputs = d3d12.BUILD_RAYTRACING_ACCELERATION_STRUCTURE_INPUTS{
.Type = .BOTTOM_LEVEL,
.Flags = d3d12.RAYTRACING_ACCELERATION_STRUCTURE_BUILD_FLAG_PREFER_FAST_TRACE,
.NumDescs = @intCast(u32, geometry_descs.items.len),
.DescsLayout = .ARRAY,
.u = .{
.pGeometryDescs = geometry_descs.items.ptr,
},
};
var blas_build_info: d3d12.RAYTRACING_ACCELERATION_STRUCTURE_PREBUILD_INFO = undefined;
gctx.device.GetRaytracingAccelerationStructurePrebuildInfo(&blas_inputs, &blas_build_info);
std.log.info("BLAS: {}", .{blas_build_info});
const blas_scratch_buffer = gctx.createCommittedResource(
.DEFAULT,
d3d12.HEAP_FLAG_NONE,
&blk: {
var desc = d3d12.RESOURCE_DESC.initBuffer(blas_build_info.ScratchDataSizeInBytes);
desc.Flags = d3d12.RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
break :blk desc;
},
d3d12.RESOURCE_STATE_UNORDERED_ACCESS,
null,
) catch |err| hrPanic(err);
temp_resources.append(blas_scratch_buffer) catch unreachable;
const blas_buffer = gctx.createCommittedResource(
.DEFAULT,
d3d12.HEAP_FLAG_NONE,
&blk: {
var desc = d3d12.RESOURCE_DESC.initBuffer(blas_build_info.ResultDataMaxSizeInBytes);
desc.Flags = d3d12.RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
break :blk desc;
},
d3d12.RESOURCE_STATE_RAYTRACING_ACCELERATION_STRUCTURE,
null,
) catch |err| hrPanic(err);
const blas_desc = d3d12.BUILD_RAYTRACING_ACCELERATION_STRUCTURE_DESC{
.DestAccelerationStructureData = gctx.lookupResource(blas_buffer).?.GetGPUVirtualAddress(),
.Inputs = blas_inputs,
.SourceAccelerationStructureData = 0,
.ScratchAccelerationStructureData = gctx.lookupResource(blas_scratch_buffer).?.GetGPUVirtualAddress(),
};
gctx.cmdlist.BuildRaytracingAccelerationStructure(&blas_desc, 0, null);
gctx.cmdlist.ResourceBarrier(
1,
&[_]d3d12.RESOURCE_BARRIER{
d3d12.RESOURCE_BARRIER.initUav(gctx.lookupResource(blas_buffer).?),
},
);
break :blk_blas blas_buffer;
} else blk_blas: {
// DXR is not supported. Create a dummy BLAS buffer to simplify code.
break :blk_blas gctx.createCommittedResource(
.DEFAULT,
d3d12.HEAP_FLAG_NONE,
&d3d12.RESOURCE_DESC.initBuffer(1),
d3d12.RESOURCE_STATE_COMMON,
null,
) catch |err| hrPanic(err);
};
// Create "Top Level Acceleration Structure" (tlas).
const tlas_buffer = if (dxr_is_supported) blk_tlas: {
const instance_desc = d3d12.RAYTRACING_INSTANCE_DESC{
.Transform = [3][4]f32{
[4]f32{ 1.0, 0.0, 0.0, 0.0 },
[4]f32{ 0.0, 1.0, 0.0, 0.0 },
[4]f32{ 0.0, 0.0, 1.0, 0.0 },
},
.InstanceID = 0,
.InstanceMask = 1,
.InstanceContributionToHitGroupIndex = 0,
.Flags = 0,
.AccelerationStructure = gctx.lookupResource(blas_buffer).?.GetGPUVirtualAddress(),
};
const instance_buffer = gctx.createCommittedResource(
.DEFAULT,
d3d12.HEAP_FLAG_NONE,
&d3d12.RESOURCE_DESC.initBuffer(@sizeOf(d3d12.RAYTRACING_INSTANCE_DESC)),
d3d12.RESOURCE_STATE_COPY_DEST,
null,
) catch |err| hrPanic(err);
temp_resources.append(instance_buffer) catch unreachable;
// Upload instance desc to instance buffer.
{
const upload = gctx.allocateUploadBufferRegion(d3d12.RAYTRACING_INSTANCE_DESC, 1);
upload.cpu_slice[0] = instance_desc;
gctx.cmdlist.CopyBufferRegion(
gctx.lookupResource(instance_buffer).?,
0,
upload.buffer,
upload.buffer_offset,
upload.cpu_slice.len * @sizeOf(@TypeOf(upload.cpu_slice[0])),
);
gctx.addTransitionBarrier(instance_buffer, d3d12.RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE);
gctx.flushResourceBarriers();
}
const tlas_inputs = d3d12.BUILD_RAYTRACING_ACCELERATION_STRUCTURE_INPUTS{
.Type = .TOP_LEVEL,
.Flags = d3d12.RAYTRACING_ACCELERATION_STRUCTURE_BUILD_FLAG_PREFER_FAST_TRACE,
.NumDescs = 1,
.DescsLayout = .ARRAY,
.u = .{
.InstanceDescs = gctx.lookupResource(instance_buffer).?.GetGPUVirtualAddress(),
},
};
var tlas_build_info: d3d12.RAYTRACING_ACCELERATION_STRUCTURE_PREBUILD_INFO = undefined;
gctx.device.GetRaytracingAccelerationStructurePrebuildInfo(&tlas_inputs, &tlas_build_info);
std.log.info("TLAS: {}", .{tlas_build_info});
const tlas_scratch_buffer = gctx.createCommittedResource(
.DEFAULT,
d3d12.HEAP_FLAG_NONE,
&blk: {
var desc = d3d12.RESOURCE_DESC.initBuffer(tlas_build_info.ScratchDataSizeInBytes);
desc.Flags = d3d12.RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
break :blk desc;
},
d3d12.RESOURCE_STATE_UNORDERED_ACCESS,
null,
) catch |err| hrPanic(err);
temp_resources.append(tlas_scratch_buffer) catch unreachable;
const tlas_buffer = gctx.createCommittedResource(
.DEFAULT,
d3d12.HEAP_FLAG_NONE,
&blk: {
var desc = d3d12.RESOURCE_DESC.initBuffer(tlas_build_info.ResultDataMaxSizeInBytes);
desc.Flags = d3d12.RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
break :blk desc;
},
d3d12.RESOURCE_STATE_RAYTRACING_ACCELERATION_STRUCTURE,
null,
) catch |err| hrPanic(err);
const tlas_desc = d3d12.BUILD_RAYTRACING_ACCELERATION_STRUCTURE_DESC{
.DestAccelerationStructureData = gctx.lookupResource(tlas_buffer).?.GetGPUVirtualAddress(),
.Inputs = tlas_inputs,
.SourceAccelerationStructureData = 0,
.ScratchAccelerationStructureData = gctx.lookupResource(tlas_scratch_buffer).?.GetGPUVirtualAddress(),
};
gctx.cmdlist.BuildRaytracingAccelerationStructure(&tlas_desc, 0, null);
gctx.cmdlist.ResourceBarrier(
1,
&[_]d3d12.RESOURCE_BARRIER{
d3d12.RESOURCE_BARRIER.initUav(gctx.lookupResource(tlas_buffer).?),
},
);
break :blk_tlas tlas_buffer;
} else blk_tlas: {
// DXR is not supported. Create a dummy TLAS buffer to simplify code.
break :blk_tlas gctx.createCommittedResource(
.DEFAULT,
d3d12.HEAP_FLAG_NONE,
&d3d12.RESOURCE_DESC.initBuffer(1),
d3d12.RESOURCE_STATE_COMMON,
null,
) catch |err| hrPanic(err);
};
gctx.endFrame();
gctx.finishGpuCommands();
_ = zpix.endCapture();
mipgen_rgba8.deinit(&gctx);
for (temp_resources.items) |resource| {
gctx.destroyResource(resource);
}
return DemoState{
.gctx = gctx,
.guir = guir,
.frame_stats = common.FrameStats.init(),
.static_mesh_pso = static_mesh_pso,
.z_pre_pass_pso = z_pre_pass_pso,
.gen_shadow_rays_pso = gen_shadow_rays_pso,
.trace_shadow_rays_stateobj = trace_shadow_rays_stateobj,
.trace_shadow_rays_rs = trace_shadow_rays_rs,
.trace_shadow_rays_table = trace_shadow_rays_table,
.shadow_rays_texture = shadow_rays_texture,
.shadow_rays_texture_rtv = shadow_rays_texture_rtv,
.shadow_rays_texture_srv = shadow_rays_texture_srv,
.shadow_mask_texture = shadow_mask_texture,
.shadow_mask_texture_uav = shadow_mask_texture_uav,
.shadow_mask_texture_srv = shadow_mask_texture_srv,
.meshes = all_meshes,
.materials = all_materials,
.textures = all_textures,
.depth_texture = depth_texture,
.depth_texture_dsv = depth_texture_dsv,
.depth_texture_srv = depth_texture_srv,
.vertex_buffer = vertex_buffer,
.index_buffer = index_buffer,
.blas_buffer = blas_buffer,
.tlas_buffer = tlas_buffer,
.camera = .{
.position = Vec3.init(0.0, 1.0, 0.0),
.forward = Vec3.initZero(),
.pitch = 0.0,
.yaw = math.pi + 0.25 * math.pi,
},
.mouse = .{
.cursor_prev_x = 0,
.cursor_prev_y = 0,
},
.light_position = Vec3.init(0.0, 5.0, 0.0),
.dxr_is_supported = dxr_is_supported,
.dxr_draw_mode = dxr_draw_mode,
};
}
fn deinit(demo: *DemoState, allocator: std.mem.Allocator) void {
demo.gctx.finishGpuCommands();
if (demo.dxr_is_supported) {
_ = demo.trace_shadow_rays_stateobj.?.Release();
_ = demo.trace_shadow_rays_rs.?.Release();
}
demo.meshes.deinit();
demo.materials.deinit();
demo.textures.deinit();
demo.guir.deinit(&demo.gctx);
demo.gctx.deinit(allocator);
common.deinitWindow(allocator);
demo.* = undefined;
}
fn update(demo: *DemoState) void {
demo.frame_stats.update(demo.gctx.window, window_name);
common.newImGuiFrame(demo.frame_stats.delta_time);
c.igSetNextWindowPos(
c.ImVec2{ .x = @intToFloat(f32, demo.gctx.viewport_width) - 600.0 - 20, .y = 20.0 },
c.ImGuiCond_FirstUseEver,
c.ImVec2{ .x = 0.0, .y = 0.0 },
);
c.igSetNextWindowSize(c.ImVec2{ .x = 600.0, .y = 0.0 }, c.ImGuiCond_FirstUseEver);
_ = c.igBegin(
"Demo Settings",
null,
c.ImGuiWindowFlags_NoMove | c.ImGuiWindowFlags_NoResize | c.ImGuiWindowFlags_NoSavedSettings,
);
if (demo.dxr_is_supported) {
c.igTextColored(
c.ImVec4{ .x = 0.0, .y = 0.75, .z = 0.0, .w = 1.0 },
"DirectX Raytracing (DXR) is supported.",
"",
);
} else {
c.igTextColored(
c.ImVec4{ .x = 0.75, .y = 0.0, .z = 0.0, .w = 1.0 },
"DirectX Raytracing (DXR) is NOT supported.",
"",
);
}
c.igBeginDisabled(!demo.dxr_is_supported);
_ = c.igRadioButton_IntPtr("No Shadows", &demo.dxr_draw_mode, 0);
_ = c.igRadioButton_IntPtr("Shadows", &demo.dxr_draw_mode, 1);
_ = c.igRadioButton_IntPtr("Shadow Mask", &demo.dxr_draw_mode, 2);
c.igEndDisabled();
c.igEnd();
// Handle camera rotation with mouse.
{
var pos: w32.POINT = undefined;
_ = w32.GetCursorPos(&pos);
const delta_x = @intToFloat(f32, pos.x) - @intToFloat(f32, demo.mouse.cursor_prev_x);
const delta_y = @intToFloat(f32, pos.y) - @intToFloat(f32, demo.mouse.cursor_prev_y);
demo.mouse.cursor_prev_x = pos.x;
demo.mouse.cursor_prev_y = pos.y;
if (w32.GetAsyncKeyState(w32.VK_RBUTTON) < 0) {
demo.camera.pitch += 0.0025 * delta_y;
demo.camera.yaw += 0.0025 * delta_x;
demo.camera.pitch = math.min(demo.camera.pitch, 0.48 * math.pi);
demo.camera.pitch = math.max(demo.camera.pitch, -0.48 * math.pi);
demo.camera.yaw = vm.modAngle(demo.camera.yaw);
}
}
// Handle camera movement with 'WASD' keys.
{
const speed: f32 = 5.0;
const delta_time = demo.frame_stats.delta_time;
const transform = Mat4.initRotationX(demo.camera.pitch).mul(Mat4.initRotationY(demo.camera.yaw));
var forward = Vec3.init(0.0, 0.0, 1.0).transform(transform).normalize();
demo.camera.forward = forward;
const right = Vec3.init(0.0, 1.0, 0.0).cross(forward).normalize().scale(speed * delta_time);
forward = forward.scale(speed * delta_time);
if (w32.GetAsyncKeyState('W') < 0) {
demo.camera.position = demo.camera.position.add(forward);
} else if (w32.GetAsyncKeyState('S') < 0) {
demo.camera.position = demo.camera.position.sub(forward);
}
if (w32.GetAsyncKeyState('D') < 0) {
demo.camera.position = demo.camera.position.add(right);
} else if (w32.GetAsyncKeyState('A') < 0) {
demo.camera.position = demo.camera.position.sub(right);
}
}
demo.light_position.c[0] = @floatCast(f32, 0.5 * @sin(0.25 * demo.frame_stats.time));
}
fn draw(demo: *DemoState) void {
var gctx = &demo.gctx;
gctx.beginFrame();
const cam_world_to_view = vm.Mat4.initLookToLh(
demo.camera.position,
demo.camera.forward,
vm.Vec3.init(0.0, 1.0, 0.0),
);
const cam_view_to_clip = vm.Mat4.initPerspectiveFovLh(
math.pi / 3.0,
@intToFloat(f32, gctx.viewport_width) / @intToFloat(f32, gctx.viewport_height),
0.1,
50.0,
);
const cam_world_to_clip = cam_world_to_view.mul(cam_view_to_clip);
const back_buffer = gctx.getBackBuffer();
gctx.addTransitionBarrier(back_buffer.resource_handle, d3d12.RESOURCE_STATE_RENDER_TARGET);
gctx.flushResourceBarriers();
gctx.cmdlist.OMSetRenderTargets(
1,
&[_]d3d12.CPU_DESCRIPTOR_HANDLE{back_buffer.descriptor_handle},
w32.TRUE,
&demo.depth_texture_dsv,
);
gctx.cmdlist.ClearRenderTargetView(
back_buffer.descriptor_handle,
&[4]f32{ 0.0, 0.0, 0.0, 1.0 },
0,
null,
);
gctx.cmdlist.ClearDepthStencilView(demo.depth_texture_dsv, d3d12.CLEAR_FLAG_DEPTH, 1.0, 0, 0, null);
gctx.cmdlist.IASetPrimitiveTopology(.TRIANGLELIST);
// Z Pre Pass.
{
zpix.beginEvent(gctx.cmdlist, "Z Pre Pass");
defer zpix.endEvent(gctx.cmdlist);
const object_to_clip = cam_world_to_clip;
const mem = gctx.allocateUploadMemory(PsoZPrePass_FrameConst, 1);
mem.cpu_slice[0] = .{
.object_to_clip = object_to_clip.transpose(),
};
gctx.setCurrentPipeline(demo.z_pre_pass_pso);
gctx.cmdlist.SetGraphicsRootConstantBufferView(1, mem.gpu_base);
gctx.cmdlist.SetGraphicsRootDescriptorTable(2, blk: {
const table = gctx.copyDescriptorsToGpuHeap(1, demo.vertex_buffer.view);
_ = gctx.copyDescriptorsToGpuHeap(1, demo.index_buffer.view);
break :blk table;
});
for (demo.meshes.items) |mesh| {
gctx.cmdlist.SetGraphicsRoot32BitConstants(0, 2, &.{ mesh.vertex_offset, mesh.index_offset }, 0);
gctx.cmdlist.DrawInstanced(mesh.num_indices, 1, 0, 0);
}
}
gctx.addTransitionBarrier(demo.shadow_rays_texture, d3d12.RESOURCE_STATE_RENDER_TARGET);
gctx.flushResourceBarriers();
// Generate shadow rays.
if (demo.dxr_is_supported and demo.dxr_draw_mode > 0) {
zpix.beginEvent(gctx.cmdlist, "Generate shadow rays.");
defer zpix.endEvent(gctx.cmdlist);
gctx.cmdlist.OMSetRenderTargets(
1,
&[_]d3d12.CPU_DESCRIPTOR_HANDLE{demo.shadow_rays_texture_rtv},
w32.TRUE,
&demo.depth_texture_dsv,
);
gctx.cmdlist.ClearRenderTargetView(
demo.shadow_rays_texture_rtv,
&[4]f32{ 0.0, 0.0, 0.0, 0.0 },
0,
null,
);
const object_to_clip = cam_world_to_clip;
const mem = gctx.allocateUploadMemory(PsoGenShadowRays_FrameConst, 1);
mem.cpu_slice[0] = .{
.object_to_clip = object_to_clip.transpose(),
.object_to_world = Mat4.initIdentity(),
};
gctx.setCurrentPipeline(demo.gen_shadow_rays_pso);
gctx.cmdlist.SetGraphicsRootConstantBufferView(1, mem.gpu_base);
gctx.cmdlist.SetGraphicsRootDescriptorTable(2, blk: {
const table = gctx.copyDescriptorsToGpuHeap(1, demo.vertex_buffer.view);
_ = gctx.copyDescriptorsToGpuHeap(1, demo.index_buffer.view);
break :blk table;
});
for (demo.meshes.items) |mesh| {
gctx.cmdlist.SetGraphicsRoot32BitConstants(0, 2, &.{ mesh.vertex_offset, mesh.index_offset }, 0);
gctx.cmdlist.DrawInstanced(mesh.num_indices, 1, 0, 0);
}
gctx.cmdlist.OMSetRenderTargets(
1,
&[_]d3d12.CPU_DESCRIPTOR_HANDLE{back_buffer.descriptor_handle},
w32.TRUE,
&demo.depth_texture_dsv,
);
}
gctx.addTransitionBarrier(demo.shadow_rays_texture, d3d12.RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE);
gctx.addTransitionBarrier(demo.shadow_mask_texture, d3d12.RESOURCE_STATE_UNORDERED_ACCESS);
gctx.flushResourceBarriers();
// Trace shadow rays.
if (demo.dxr_is_supported and demo.dxr_draw_mode > 0) {
zpix.beginEvent(gctx.cmdlist, "Trace Shadow Rays");
defer zpix.endEvent(gctx.cmdlist);
// Upload 'shader table' content (in this demo it could be done only once at init time).
{
gctx.addTransitionBarrier(demo.trace_shadow_rays_table, d3d12.RESOURCE_STATE_COPY_DEST);
gctx.flushResourceBarriers();
const total_table_size = 192;
const upload = gctx.allocateUploadBufferRegion(u8, total_table_size);
var properties: *d3d12.IStateObjectProperties = undefined;
hrPanicOnFail(demo.trace_shadow_rays_stateobj.?.QueryInterface(
&d3d12.IID_IStateObjectProperties,
@ptrCast(*?*anyopaque, &properties),
));
defer _ = properties.Release();
// ----------------------------------------------------------------------------------
// | raygen (32 B) | 0 (32 B) | miss (32 B) | 0 (32 B) | hitgroup (32 B) | 0 (32 B) |
// ----------------------------------------------------------------------------------
@memcpy(
upload.cpu_slice.ptr,
@ptrCast([*]const u8, properties.GetShaderIdentifier(L("generateShadowRay"))),
32,
);
@memset(upload.cpu_slice.ptr + 32, 0, 32);
@memcpy(
upload.cpu_slice.ptr + 64,
@ptrCast([*]const u8, properties.GetShaderIdentifier(L("shadowMiss"))),
32,
);
@memset(upload.cpu_slice.ptr + 64 + 32, 0, 32);
@memcpy(
upload.cpu_slice.ptr + 2 * 64,
@ptrCast([*]const u8, properties.GetShaderIdentifier(L("g_shadow_hit_group"))),
32,
);
@memset(upload.cpu_slice.ptr + 2 * 64 + 32, 0, 32);
gctx.cmdlist.CopyBufferRegion(
gctx.lookupResource(demo.trace_shadow_rays_table).?,
0,
upload.buffer,
upload.buffer_offset,
total_table_size,
);
gctx.addTransitionBarrier(demo.trace_shadow_rays_table, d3d12.RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE);
gctx.flushResourceBarriers();
}
const mem = gctx.allocateUploadMemory(PsoTraceShadowRays_FrameConst, 1);
mem.cpu_slice[0] = .{
.light_position = demo.light_position,
};
gctx.cmdlist.SetPipelineState1(demo.trace_shadow_rays_stateobj.?);
gctx.cmdlist.SetComputeRootSignature(demo.trace_shadow_rays_rs.?);
gctx.cmdlist.SetComputeRootShaderResourceView(
0,
gctx.lookupResource(demo.tlas_buffer).?.GetGPUVirtualAddress(),
);
gctx.cmdlist.SetComputeRootDescriptorTable(1, blk: {
const table = gctx.copyDescriptorsToGpuHeap(1, demo.shadow_rays_texture_srv);
_ = gctx.copyDescriptorsToGpuHeap(1, demo.shadow_mask_texture_uav);
break :blk table;
});
gctx.cmdlist.SetComputeRootConstantBufferView(2, mem.gpu_base);
const base_addr = gctx.lookupResource(demo.trace_shadow_rays_table).?.GetGPUVirtualAddress();
const dispatch_desc = d3d12.DISPATCH_RAYS_DESC{
.RayGenerationShaderRecord = .{ .StartAddress = base_addr, .SizeInBytes = 32 },
.MissShaderTable = .{ .StartAddress = base_addr + 64, .SizeInBytes = 32, .StrideInBytes = 32 },
.HitGroupTable = .{ .StartAddress = base_addr + 128, .SizeInBytes = 32, .StrideInBytes = 32 },
.CallableShaderTable = .{ .StartAddress = 0, .SizeInBytes = 0, .StrideInBytes = 0 },
.Width = gctx.viewport_width,
.Height = gctx.viewport_height,
.Depth = 1,
};
gctx.cmdlist.DispatchRays(&dispatch_desc);
} else {
const gpu_view = gctx.copyDescriptorsToGpuHeap(1, demo.shadow_mask_texture_uav);
gctx.cmdlist.ClearUnorderedAccessViewFloat(
gpu_view,
demo.shadow_mask_texture_uav,
gctx.lookupResource(demo.shadow_mask_texture).?,
&.{ 1000.0, 0.0, 0.0, 0.0 },
0,
null,
);
}
gctx.addTransitionBarrier(demo.shadow_mask_texture, d3d12.RESOURCE_STATE_PIXEL_SHADER_RESOURCE);
gctx.flushResourceBarriers();
// Draw Sponza.
{
zpix.beginEvent(gctx.cmdlist, "Main Pass");
defer zpix.endEvent(gctx.cmdlist);
const object_to_world = vm.Mat4.initIdentity();
const object_to_clip = object_to_world.mul(cam_world_to_clip);
const mem = gctx.allocateUploadMemory(PsoStaticMesh_FrameConst, 1);
mem.cpu_slice[0] = .{
.object_to_clip = object_to_clip.transpose(),
.object_to_world = object_to_world.transpose(),
.camera_position = demo.camera.position,
.light_position = demo.light_position,
.draw_mode = demo.dxr_draw_mode,
};
gctx.setCurrentPipeline(demo.static_mesh_pso);
gctx.cmdlist.SetGraphicsRootConstantBufferView(2, mem.gpu_base);
gctx.cmdlist.SetGraphicsRootDescriptorTable(3, blk: {
const table = gctx.copyDescriptorsToGpuHeap(1, demo.vertex_buffer.view);
_ = gctx.copyDescriptorsToGpuHeap(1, demo.index_buffer.view);
break :blk table;
});
gctx.cmdlist.SetGraphicsRootDescriptorTable(
4,
gctx.copyDescriptorsToGpuHeap(1, demo.shadow_mask_texture_srv),
);
for (demo.meshes.items) |mesh| {
gctx.cmdlist.SetGraphicsRoot32BitConstants(0, 2, &.{ mesh.vertex_offset, mesh.index_offset }, 0);
gctx.cmdlist.SetGraphicsRootDescriptorTable(1, blk: {
const color_index = demo.materials.items[mesh.material_index].base_color_tex_index;
const mr_index = demo.materials.items[mesh.material_index].metallic_roughness_tex_index;
const normal_index = demo.materials.items[mesh.material_index].normal_tex_index;
const table = gctx.copyDescriptorsToGpuHeap(1, demo.textures.items[color_index].view);
_ = gctx.copyDescriptorsToGpuHeap(1, demo.textures.items[mr_index].view);
_ = gctx.copyDescriptorsToGpuHeap(1, demo.textures.items[normal_index].view);
break :blk table;
});
gctx.cmdlist.DrawInstanced(mesh.num_indices, 1, 0, 0);
}
}
demo.guir.draw(gctx);
gctx.addTransitionBarrier(back_buffer.resource_handle, d3d12.RESOURCE_STATE_PRESENT);
gctx.flushResourceBarriers();
gctx.endFrame();
}
pub fn main() !void {
common.init();
defer common.deinit();
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
var demo = try init(allocator);
defer deinit(&demo, allocator);
while (common.handleWindowEvents()) {
update(&demo);
draw(&demo);
}
} | samples/simple_raytracer/src/simple_raytracer.zig |
const builtin = @import("builtin");
const std = @import("std");
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const WindowsPath = std.fs.path.WindowsPath;
const native_os = builtin.target.os.tag;
pub fn replaceExtension(allocator: Allocator, path: []const u8, new_ext: []const u8) ![]u8 {
const old_ext = std.fs.path.extension(path);
const without_ext = path[0..(@ptrToInt(old_ext.ptr) - @ptrToInt(path.ptr))];
var result: []u8 = undefined;
if (new_ext.len == 0) {
result = try allocator.alloc(u8, without_ext.len + new_ext.len);
std.mem.copy(u8, result, without_ext);
} else if (new_ext[0] == '.') {
result = try allocator.alloc(u8, without_ext.len + new_ext.len);
std.mem.copy(u8, result, without_ext);
std.mem.copy(u8, result[without_ext.len..], new_ext);
} else {
result = try allocator.alloc(u8, without_ext.len + new_ext.len + 1);
std.mem.copy(u8, result, without_ext);
result[without_ext.len] = '.';
std.mem.copy(u8, result[without_ext.len + 1 ..], new_ext);
}
return result;
}
pub fn toAbsolute(allocator: Allocator, path: []const u8) ![]u8 {
if (std.fs.path.isAbsolute(path)) {
return composePath(allocator, @as(*const [1][]const u8, &path), 0);
} else {
var cwd = try std.process.getCwdAlloc(allocator);
defer allocator.free(cwd);
var parts = [_][]const u8{ cwd, path };
return composePath(allocator, &parts, 0);
}
}
/// Like std.fs.path.resolve, except it doesn't convert relative paths to absolute, and
/// it doesn't resolve ".." segments to avoid incorrect behavior in the presence of links.
pub fn composePath(allocator: Allocator, paths: []const []const u8, sep: u8) ![]u8 {
if (native_os == .windows) {
return composePathWindows(allocator, paths, sep);
} else {
return composePathPosix(allocator, paths, sep);
}
}
pub fn composePathWindows(allocator: Allocator, paths: []const []const u8, sep: u8) ![]u8 {
if (paths.len == 0) {
var result: []u8 = try allocator.alloc(u8, 1);
result[0] = '.';
return result;
}
const separator = if (sep == 0) '\\' else sep;
// determine which disk designator we will result with, if any
var result_drive_buf = "_:".*;
var result_disk_designator: []const u8 = "";
var have_drive_kind = WindowsPath.Kind.None;
var have_abs_path = false;
var first_index: usize = 0;
var max_size: usize = 0;
for (paths) |p, i| {
const parsed = std.fs.path.windowsParsePath(p);
if (parsed.is_abs) {
have_abs_path = true;
first_index = i;
max_size = result_disk_designator.len;
}
switch (parsed.kind) {
WindowsPath.Kind.Drive => {
result_drive_buf[0] = std.ascii.toUpper(parsed.disk_designator[0]);
result_disk_designator = result_drive_buf[0..];
have_drive_kind = WindowsPath.Kind.Drive;
},
WindowsPath.Kind.NetworkShare => {
result_disk_designator = parsed.disk_designator;
have_drive_kind = WindowsPath.Kind.NetworkShare;
},
WindowsPath.Kind.None => {},
}
max_size += p.len + 1;
}
// if we will result with a disk designator, loop again to determine
// which is the last time the disk designator is absolutely specified, if any
// and count up the max bytes for paths related to this disk designator
if (have_drive_kind != WindowsPath.Kind.None) {
have_abs_path = false;
first_index = 0;
max_size = result_disk_designator.len;
var correct_disk_designator = false;
for (paths) |p, i| {
const parsed = std.fs.path.windowsParsePath(p);
if (parsed.kind != WindowsPath.Kind.None) {
if (parsed.kind == have_drive_kind) {
correct_disk_designator = compareDiskDesignators(have_drive_kind, result_disk_designator, parsed.disk_designator);
} else {
continue;
}
}
if (!correct_disk_designator) {
continue;
}
if (parsed.is_abs) {
first_index = i;
max_size = result_disk_designator.len;
have_abs_path = true;
}
max_size += p.len + 1;
}
}
// Allocate result and fill in the disk designator, calling getCwd if we have to.
var result: []u8 = try allocator.alloc(u8, max_size);
errdefer allocator.free(result);
var result_index: usize = 0;
if (have_abs_path) {
switch (have_drive_kind) {
WindowsPath.Kind.Drive => {
std.mem.copy(u8, result, result_disk_designator);
result_index += result_disk_designator.len;
},
WindowsPath.Kind.NetworkShare => {
var it = std.mem.tokenize(u8, paths[first_index], "/\\");
const server_name = it.next().?;
const other_name = it.next().?;
result[result_index] = '\\';
result_index += 1;
result[result_index] = '\\';
result_index += 1;
std.mem.copy(u8, result[result_index..], server_name);
result_index += server_name.len;
result[result_index] = '\\';
result_index += 1;
std.mem.copy(u8, result[result_index..], other_name);
result_index += other_name.len;
result_disk_designator = result[0..result_index];
},
WindowsPath.Kind.None => {},
}
}
// Now we know the disk designator to use, if any, and what kind it is. And our result
// is big enough to append all the paths to.
var correct_disk_designator = true;
for (paths[first_index..]) |p| {
const parsed = std.fs.path.windowsParsePath(p);
if (parsed.kind != WindowsPath.Kind.None) {
if (parsed.kind == have_drive_kind) {
correct_disk_designator = compareDiskDesignators(have_drive_kind, result_disk_designator, parsed.disk_designator);
} else {
continue;
}
}
if (!correct_disk_designator) {
continue;
}
var it = std.mem.tokenize(u8, p[parsed.disk_designator.len..], "/\\");
while (it.next()) |component| {
if (std.mem.eql(u8, component, ".")) {
continue;
} else {
if (have_abs_path or result_index > 0) {
result[result_index] = separator;
result_index += 1;
}
std.mem.copy(u8, result[result_index..], component);
result_index += component.len;
}
}
}
if (have_abs_path and result_index == result_disk_designator.len) {
result[0] = separator;
result_index += 1;
} else if (!have_abs_path and result_index == 0) {
result[0] = '.';
result_index += 1;
}
return allocator.shrink(result, result_index);
}
pub fn composePathPosix(allocator: Allocator, paths: []const []const u8, sep: u8) ![]u8 {
if (paths.len == 0) {
var result: []u8 = try allocator.alloc(u8, 1);
result[0] = '.';
return result;
}
const separator = if (sep == 0) '/' else sep;
var first_index: usize = 0;
var have_abs = false;
var max_size: usize = 0;
for (paths) |p, i| {
if (std.fs.isAbsolutePosix(p)) {
first_index = i;
have_abs = true;
max_size = 0;
}
max_size += p.len + 1;
}
var result: []u8 = undefined;
var result_index: usize = 0;
result = try allocator.alloc(u8, max_size);
errdefer allocator.free(result);
for (paths[first_index..]) |p| {
var it = std.mem.tokenize(u8, p, "/");
while (it.next()) |component| {
if (std.mem.eql(u8, component, ".")) {
continue;
} else {
if (have_abs or result_index > 0) {
result[result_index] = separator;
result_index += 1;
}
std.mem.copy(u8, result[result_index..], component);
result_index += component.len;
}
}
}
if (result_index == 0) {
if (have_abs) {
result[0] = separator;
} else {
result[0] = '.';
}
result_index += 1;
}
return allocator.shrink(result, result_index);
}
/// If 'path' is a subpath of 'ancestor', returns the subpath portion.
/// If 'path' and 'ancestor' are the same, returns `.`.
/// Otherwise, returns 'path'.
/// Any `.` segments in either path are ignored (but not `..` segments).
/// On windows, `/` and `\` can be used interchangeably.
/// Note this is only a lexical operation; it doesn't depend on the paths
/// existing or change based on the current working directory.
pub fn pathRelativeToAncestor(path: []const u8, ancestor: []const u8) []const u8 {
if (native_os == .windows) {
return pathRelativeToAncestorWindows(path, ancestor);
} else {
return pathRelativeToAncestorPosix(path, ancestor);
}
}
pub inline fn pathRelativeToAncestorWindows(path: []const u8, ancestor: []const u8) []const u8 {
return pathRelativeToAncestorGeneric(path, ancestor, "/\\");
}
pub inline fn pathRelativeToAncestorPosix(path: []const u8, ancestor: []const u8) []const u8 {
return pathRelativeToAncestorGeneric(path, ancestor, "/");
}
fn pathRelativeToAncestorGeneric(path: []const u8, ancestor: []const u8, comptime tokens: []const u8) []const u8 {
var path_it = std.mem.tokenize(u8, path, tokens);
var ancestor_it = std.mem.tokenize(u8, ancestor, tokens);
if (prefixMatches(&path_it, &ancestor_it)) {
var start = path_it.next() orelse return ".";
while (std.mem.eql(u8, start, ".")) {
start = path_it.next() orelse return ".";
}
return path[(@ptrToInt(start.ptr) - @ptrToInt(path.ptr))..];
} else {
return path;
}
}
fn prefixMatches(path_it: *std.mem.TokenIterator(u8), ancestor_it: *std.mem.TokenIterator(u8)) bool {
while (true) {
var ancestor_part = ancestor_it.next() orelse return true;
while (std.mem.eql(u8, ancestor_part, ".")) {
ancestor_part = ancestor_it.next() orelse return true;
}
var path_part = path_it.next() orelse return false;
while (std.mem.eql(u8, path_part, ".")) {
path_part = path_it.next() orelse return false;
}
if (!std.mem.eql(u8, path_part, ancestor_part)) {
return false;
}
}
}
fn compareDiskDesignators(kind: WindowsPath.Kind, p1: []const u8, p2: []const u8) bool {
switch (kind) {
WindowsPath.Kind.None => {
assert(p1.len == 0);
assert(p2.len == 0);
return true;
},
WindowsPath.Kind.Drive => {
return std.ascii.toUpper(p1[0]) == std.ascii.toUpper(p2[0]);
},
WindowsPath.Kind.NetworkShare => {
const sep1 = p1[0];
const sep2 = p2[0];
var it1 = std.mem.tokenize(u8, p1, &[_]u8{sep1});
var it2 = std.mem.tokenize(u8, p2, &[_]u8{sep2});
// TODO ASCII is wrong, we actually need full unicode support to compare paths.
return std.ascii.eqlIgnoreCase(it1.next().?, it2.next().?) and std.ascii.eqlIgnoreCase(it1.next().?, it2.next().?);
},
}
}
const CopyTreeError = error {SystemResources} || std.os.CopyFileRangeError || std.os.SendFileError || std.os.RenameError || std.os.OpenError;
pub fn copyTree(source_dir: std.fs.Dir, source_path: []const u8, dest_dir: std.fs.Dir, dest_path: []const u8, options: std.fs.CopyFileOptions) CopyTreeError!void {
// TODO figure out how to handle symlinks better
source_dir.copyFile(source_path, dest_dir, dest_path, options) catch |err| switch (err) {
error.IsDir => {
var src = try source_dir.openDir(source_path, .{ .iterate = true, .no_follow = true });
defer src.close();
var dest = try dest_dir.makeOpenPath(dest_path, .{ .no_follow = true });
defer dest.close();
try copyDir(src, dest, options);
},
else => return err,
};
}
fn copyDir(source_dir: std.fs.Dir, dest_dir: std.fs.Dir, options: std.fs.CopyFileOptions) CopyTreeError!void {
var iter = source_dir.iterate();
while (try iter.next()) |entry| {
if (entry.kind == std.fs.File.Kind.Directory) {
var src = try source_dir.openDir(entry.name, .{ .iterate = true, .no_follow = true });
defer src.close();
var dest = try dest_dir.makeOpenPath(entry.name, .{ .no_follow = true });
defer dest.close();
try copyDir(src, dest, options);
} else {
try copyTree(source_dir, entry.name, dest_dir, entry.name, options);
}
}
}
pub fn errorName(err: anyerror) [:0]const u8 {
return switch (err) {
error.AccessDenied, error.PermissionDenied => "Access denied",
error.BadPathName, error.InvalidUtf8 => "Invalid path",
error.DeviceBusy => "Device busy",
error.FileBusy => "File busy",
error.PipeBusy => "Pipe busy",
error.FileNotFound => "File not found",
error.FileTooBig => "File too big",
error.InputOutput => "I/O error",
error.IsDir => "Path is a directory",
error.NotDir => "Path is not a directory",
error.NameTooLong => "Path too long",
error.NoDevice => "Device not found",
error.NoSpaceLeft => "Insufficient space remaining",
error.PathAlreadyExists => "Path already exists",
error.ProcessFdQuotaExceeded => "No more process file descriptors",
error.SystemFdQuotaExceeded => "No more system file descriptors",
error.SharingViolation => "Sharing violation",
error.SymLinkLoop => "Symlink loop",
error.OutOfMemory => "Out of memory",
else => "Unexpected filesystem error",
};
} | limp/fs.zig |
const std = @import("std");
const fs = std.fs;
const io = std.io;
const info = std.log.info;
const print = std.debug.print;
const fmt = std.fmt;
const ArrayList = std.ArrayList;
const assert = std.debug.assert;
const utils = @import("utils.zig");
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const Node = struct {
allo: *std.mem.Allocator,
value: usize,
next: *Node,
pub fn populate(self: *Node, allo: *std.mem.Allocator, value: usize) void {
self.allo = allo;
self.value = value;
self.next = allo.create(Node) catch unreachable;
}
};
fn print_ring(node: *Node) void {
var next = node;
while (true) {
print("{}", .{next.value});
next = next.next;
if (next.value == node.value) break;
}
print("\n", .{});
}
fn slice_contains(node: *Node, value: usize, cnt: usize) bool {
var next = node;
var i: usize = 0;
while (i < cnt) : (i += 1) {
if (next.value == value) return true;
next = next.next;
if (next.value == node.value) return false;
}
return false;
}
fn cut_right(node: *Node, cnt: usize) *Node {
if (cnt == 0) {
return node;
} else {
const new_next = cut_right(node.next, cnt - 1);
const cut_slice = node.next;
node.next = new_next;
return cut_slice;
}
}
fn right_nth(node: *Node, nth: usize) *Node {
var next = node;
var i: usize = 0;
while (true) : (i += 1) {
if (i == nth) return next;
next = next.next;
}
}
fn splice_right(node: *Node, new: *Node, cnt: usize) void {
// store continuation to ring
const old_next = node.next;
// insert new slice
node.next = new;
// slice last node
var slice_end = right_nth(new, cnt - 1);
slice_end.next = old_next;
}
fn map_nodes(node: *Node, output: []*Node) void {
var next = node;
while (true) {
info("> {}", .{next.value});
output[next.value] = next;
next = next.next;
if (next.value == node.value) break;
}
}
fn destroy_ring(allo: *std.mem.Allocator, node: *Node) void {
var next = node;
var i: usize = 0;
while (true) : (i += 1) {
const prev = next;
next = next.next;
defer allo.destroy(prev);
if (next.value == node.value) break;
}
}
fn run_game(allo: *std.mem.Allocator, node: *Node, rounds: usize, largest_value: usize) *Node {
// helper for finding nodes quickly
var node_map: []*Node = allo.alloc(*Node, largest_value + 1) catch unreachable;
map_nodes(node, node_map);
var first_node = node;
var i: usize = 0;
while (i < rounds) : (i += 1) {
// cut a slice and patch the remaining nodes
const cur_val = first_node.value;
const cut = first_node.next;
const new_next = right_nth(first_node, 1 + 3);
first_node.next = new_next;
// find target value
var target: usize = if (cur_val == 1) largest_value else cur_val - 1;
while (slice_contains(cut, target, 3)) {
target = if (target == 1) largest_value else target - 1;
}
// our target is behind us! so lets use a map
var target_node = node_map[target];
if (target_node.value != target) unreachable;
// splice cut into ring
splice_right(target_node, cut, 3);
// choose a new "first" node
first_node = first_node.next;
}
const one_node = node_map[1];
allo.free(node_map);
return one_node;
}
pub fn main() !void {
const begin = @divTrunc(std.time.nanoTimestamp(), 1000);
// setup
//
defer _ = gpa.deinit();
var allo = &gpa.allocator;
var lines: std.mem.TokenIterator = try utils.readInputLines(allo, "./input1");
defer allo.free(lines.buffer);
var p1: usize = 0;
var p2: usize = 0;
// setup done
var largest_value: usize = 0;
var first_node_p1: *Node = try allo.create(Node);
var last_node_p1: *Node = first_node_p1;
var first_node_p2: *Node = try allo.create(Node);
var last_node_p2: *Node = first_node_p2;
const line = lines.next() orelse unreachable;
for (line) |char, i| {
// create new node from input
const char_val = try fmt.parseUnsigned(usize, &[_]u8{char}, 10);
if (char_val > largest_value) largest_value = char_val;
last_node_p1.populate(allo, char_val);
last_node_p2.populate(allo, char_val);
// do not move to next node on last char
if (i < line.len - 1) {
last_node_p1 = last_node_p1.next;
last_node_p2 = last_node_p2.next;
}
}
// finally link first and last nodes
allo.destroy(last_node_p1.next);
// link first and last
last_node_p1.next = first_node_p1;
last_node_p2 = last_node_p2.next;
// NOTE: p2 we do not link first and last yet
// p1
const first_game = run_game(allo, first_node_p1, 100, largest_value);
print("p1 order (drop the 1): ", .{});
print_ring(first_game);
// p2
var i = largest_value + 1;
while (i <= 1000000) : (i += 1) {
last_node_p2.populate(allo, i);
if (i < 1000000) {
last_node_p2 = last_node_p2.next;
}
}
allo.destroy(last_node_p2.next);
last_node_p2.next = first_node_p2;
const second_game = run_game(allo, first_node_p2, 10000000, 1000000);
p2 = second_game.next.value * second_game.next.next.value;
print("p2: {}\n", .{p2});
// destroy memory
destroy_ring(allo, first_node_p1);
info("freeing p2 nodes... this takes a long while", .{});
destroy_ring(allo, last_node_p2);
// end
const delta = @divTrunc(std.time.nanoTimestamp(), 1000) - begin;
print("all done in {} microseconds\n", .{delta});
} | day_23/src/main.zig |
const std = @import("std");
const mem = std.mem;
const UnifiedIdeograph = @This();
allocator: *mem.Allocator,
array: []bool,
lo: u21 = 13312,
hi: u21 = 201546,
pub fn init(allocator: *mem.Allocator) !UnifiedIdeograph {
var instance = UnifiedIdeograph{
.allocator = allocator,
.array = try allocator.alloc(bool, 188235),
};
mem.set(bool, instance.array, false);
var index: u21 = 0;
index = 0;
while (index <= 6591) : (index += 1) {
instance.array[index] = true;
}
index = 6656;
while (index <= 27644) : (index += 1) {
instance.array[index] = true;
}
index = 50702;
while (index <= 50703) : (index += 1) {
instance.array[index] = true;
}
instance.array[50705] = true;
index = 50707;
while (index <= 50708) : (index += 1) {
instance.array[index] = true;
}
instance.array[50719] = true;
instance.array[50721] = true;
index = 50723;
while (index <= 50724) : (index += 1) {
instance.array[index] = true;
}
index = 50727;
while (index <= 50729) : (index += 1) {
instance.array[index] = true;
}
index = 117760;
while (index <= 160477) : (index += 1) {
instance.array[index] = true;
}
index = 160512;
while (index <= 164660) : (index += 1) {
instance.array[index] = true;
}
index = 164672;
while (index <= 164893) : (index += 1) {
instance.array[index] = true;
}
index = 164896;
while (index <= 170657) : (index += 1) {
instance.array[index] = true;
}
index = 170672;
while (index <= 178144) : (index += 1) {
instance.array[index] = true;
}
index = 183296;
while (index <= 188234) : (index += 1) {
instance.array[index] = true;
}
// Placeholder: 0. Struct name, 1. Code point kind
return instance;
}
pub fn deinit(self: *UnifiedIdeograph) void {
self.allocator.free(self.array);
}
// isUnifiedIdeograph checks if cp is of the kind Unified_Ideograph.
pub fn isUnifiedIdeograph(self: UnifiedIdeograph, cp: u21) bool {
if (cp < self.lo or cp > self.hi) return false;
const index = cp - self.lo;
return if (index >= self.array.len) false else self.array[index];
} | src/components/autogen/PropList/UnifiedIdeograph.zig |
const std = @import("std");
const testing = std.testing;
const allocator = std.heap.page_allocator;
pub const Adapter = struct {
top: usize,
sorted: bool,
ratings: std.ArrayList(usize),
pub fn init() Adapter {
var self = Adapter{
.ratings = std.ArrayList(usize).init(allocator),
.top = 0,
.sorted = true,
};
return self;
}
pub fn deinit(self: *Adapter) void {
self.ratings.deinit();
}
pub fn add_rating(self: *Adapter, line: []const u8) void {
const number = std.fmt.parseInt(usize, line, 10) catch unreachable;
self.ratings.append(number) catch unreachable;
if (self.top < number) self.top = number;
self.sorted = false;
}
pub fn get_one_by_three(self: *Adapter) usize {
self.check_and_sort();
var counts = [_]usize{0} ** 4;
var previous: usize = 0;
var p: usize = 0;
while (p < self.ratings.items.len) : (p += 1) {
const current = self.ratings.items[p];
const delta = current - previous;
if (delta > 3) {
@panic("Too big");
}
counts[delta] += 1;
previous = current;
}
return counts[1] * counts[3];
}
pub fn count_valid(self: *Adapter) usize {
self.check_and_sort();
var ways = std.AutoHashMap(usize, usize).init(allocator);
defer ways.deinit();
var p: usize = 0;
_ = ways.put(0, 1) catch unreachable;
while (p < self.ratings.items.len) : (p += 1) {
var rating = self.ratings.items[p];
var count: usize = 0;
var pos: usize = 1;
while (pos <= 3) : (pos += 1) {
if (rating < pos) continue; // too close to the beginning
const needed = rating - pos;
if (!ways.contains(needed)) continue; // don't have this adapter
count += ways.get(needed).?; // this adapter contributes these many ways
}
_ = ways.put(rating, count) catch unreachable;
// std.debug.warn("WAYS {} = {}\n", .{ rating, count });
}
return ways.get(self.top).?;
}
fn check_and_sort(self: *Adapter) void {
if (self.sorted) return;
self.sorted = true;
self.top += 3;
self.ratings.append(self.top) catch unreachable;
std.sort.sort(usize, self.ratings.items, {}, comptime std.sort.asc(usize));
}
};
test "sample small" {
const data: []const u8 =
\\16
\\10
\\15
\\5
\\1
\\11
\\7
\\19
\\6
\\12
\\4
;
var adapter = Adapter.init();
defer adapter.deinit();
var it = std.mem.split(u8, data, "\n");
while (it.next()) |line| {
adapter.add_rating(line);
}
const one_by_three = adapter.get_one_by_three();
try testing.expect(one_by_three == 35);
const valid = adapter.count_valid();
try testing.expect(valid == 8);
}
test "sample large" {
const data: []const u8 =
\\28
\\33
\\18
\\42
\\31
\\14
\\46
\\20
\\48
\\47
\\24
\\23
\\49
\\45
\\19
\\38
\\39
\\11
\\1
\\32
\\25
\\35
\\8
\\17
\\7
\\9
\\4
\\2
\\34
\\10
\\3
;
var adapter = Adapter.init();
defer adapter.deinit();
var it = std.mem.split(u8, data, "\n");
while (it.next()) |line| {
adapter.add_rating(line);
}
const one_by_three = adapter.get_one_by_three();
try testing.expect(one_by_three == 220);
const valid = adapter.count_valid();
try testing.expect(valid == 19208);
} | 2020/p10/adapter.zig |
const std = @import("std");
const debug = std.debug;
const heap = std.heap;
const LinkedList = std.LinkedList;
const mem = std.mem;
const Node = LinkedList(u32).Node;
const debug_logging: bool = false;
pub fn main() void {
var allocator = &std.heap.DirectAllocator.init().allocator;
debug.warn("09-1: {}\n", computeHighScore(allocator, 465, 71498));
debug.warn("09-2: {}\n", computeHighScore(allocator, 465, 71498 * 100));
}
fn printTurn(player_turn: ?u32, circle: LinkedList(u32), current_num: u32) void {
if (player_turn) |t| {
logDebug("[{}] ", t);
} else {
logDebug("[-] ");
}
var it = circle.first;
var i: usize = 0;
while (it) |node| : ({ it = node.next; i += 1; }) {
if (i >= circle.len) {
break;
}
if (node.data == current_num) {
logDebug("({}) ", node.data);
} else {
logDebug("{} ", node.data);
}
}
logDebug("\n");
}
fn computeHighScore(allocator: *mem.Allocator, num_players: u32, num_marbles: u32) !u32 {
var scores = try allocator.alloc(u32, num_players);
defer allocator.free(scores);
for (scores) |*s| {
s.* = 0;
}
const buf = try allocator.alloc(u8, num_marbles * @sizeOf(Node));
defer allocator.free(buf);
// TODO: Why does this explode my memory usage!?
//const node_allocator = allocator;
const node_allocator = &heap.FixedBufferAllocator.init(buf[0..]).allocator;
var circle = LinkedList(u32).init();
var initial_marble = try circle.createNode(0, node_allocator);
defer circle.destroyNode(initial_marble, node_allocator);
circle.first = initial_marble;
circle.last = circle.first;
circle.first.?.next = circle.first;
circle.first.?.prev = circle.first;
circle.len = 1;
var current: *Node = circle.first orelse unreachable;
var last_played: u32 = 0;
var turn: u32 = 1;
while (last_played < num_marbles) : (last_played += 1) {
var to_be_played = last_played + 1;
if (to_be_played % 23 == 0) {
var to_remove = current.prev.?.prev.?.prev.?.prev.?.prev.?.prev.?.prev orelse unreachable;
defer circle.destroyNode(to_remove, node_allocator);
var to_make_current = to_remove.next orelse unreachable;
circle.remove(to_remove);
current = to_make_current;
scores[turn] += (to_be_played + to_remove.data);
} else {
var new_marble = try circle.createNode(to_be_played, node_allocator);
var two_clockwise_from_current = current.next.?.next orelse unreachable;
circle.insertBefore(two_clockwise_from_current, new_marble);
current = new_marble;
}
turn += 1;
turn %= num_players;
}
var high_score: u32 = 0;
for (scores) |s| {
if (s > high_score) {
high_score = s;
}
}
logDebug("High Score: {}\n", high_score);
return high_score;
}
test "compute high score" {
var allocator = &std.heap.DirectAllocator.init().allocator;
debug.warn("\n");
debug.assert(32 == try computeHighScore(allocator, 9, 25));
debug.assert(8317 == try computeHighScore(allocator, 10, 1618));
debug.assert(146373 == try computeHighScore(allocator, 13, 7999));
debug.assert(2764 == try computeHighScore(allocator, 17, 1104));
debug.assert(54718 == try computeHighScore(allocator, 21, 6111));
debug.assert(37305 == try computeHighScore(allocator, 30, 5807));
}
fn logDebug(comptime format_str: []const u8, args: ...) void {
if (debug_logging) {
debug.warn(format_str, args);
}
} | 2018/day_09.zig |
const url = @import("./url.zig");
const std = @import("std");
const assert = std.debug.assert;
const mem = std.mem;
const debug = std.debug;
const EscapeTest = struct{
in: []const u8,
out: []const u8,
err: ?url.Error,
};
fn unescapePassingTests() []const EscapeTest {
const ts = []EscapeTest{
EscapeTest{
.in = "",
.out = "",
.err = null,
},
EscapeTest{
.in = "1%41",
.out = "1A",
.err = null,
},
EscapeTest{
.in = "1%41%42%43",
.out = "1ABC",
.err = null,
},
EscapeTest{
.in = "%4a",
.out = "J",
.err = null,
},
EscapeTest{
.in = "%6F",
.out = "o",
.err = null,
},
EscapeTest{
.in = "a+b",
.out = "a b",
.err = null,
},
EscapeTest{
.in = "a%20b",
.out = "a b",
.err = null,
},
};
return ts[0..];
}
fn unescapeFailingTests() []const EscapeTest {
const ts = []EscapeTest{
EscapeTest{
.in = "%",
.out = "",
.err = url.Error.EscapeError,
},
EscapeTest{
.in = "%a",
.out = "",
.err = url.Error.EscapeError,
},
EscapeTest{
.in = "%1",
.out = "",
.err = url.Error.EscapeError,
},
EscapeTest{
.in = "123%45%6",
.out = "",
.err = url.Error.EscapeError,
},
EscapeTest{
.in = "%zzzzz",
.out = "",
.err = url.Error.EscapeError,
},
};
return ts[0..];
}
test "QueryUnEscape" {
var buffer = try std.Buffer.init(debug.global_allocator, "");
var buf = &buffer;
defer buf.deinit();
for (unescapePassingTests()) |ts| {
try url.queryUnEscape(buf, ts.in);
assert(buf.eql(ts.out));
buf.shrink(0);
}
for (unescapeFailingTests()) |ts| {
if (url.queryUnEscape(buf, ts.in)) {
@panic("expected an error");
} else |err| {
assert(err == ts.err.?);
}
buf.shrink(0);
}
}
fn queryEscapeTests() []const EscapeTest {
const ts = []EscapeTest{
EscapeTest{
.in = "",
.out = "",
.err = null,
},
EscapeTest{
.in = "abc",
.out = "abc",
.err = null,
},
EscapeTest{
.in = "one two",
.out = "one+two",
.err = null,
},
EscapeTest{
.in = "10%",
.out = "10%25",
.err = null,
},
EscapeTest{
.in = " ?&=#+%!<>#\"{}|\\^[]`☺\t:/@$'()*,;",
.out = "+%3F%26%3D%23%2B%25%21%3C%3E%23%22%7B%7D%7C%5C%5E%5B%5D%60%E2%98%BA%09%3A%2F%40%24%27%28%29%2A%2C%3B",
.err = null,
},
};
return ts[0..];
}
test "QueryEscape" {
var buffer = try std.Buffer.init(debug.global_allocator, "");
var buf = &buffer;
defer buf.deinit();
for (queryEscapeTests()) |ts| {
try url.queryEscape(buf, ts.in);
assert(buf.eql(ts.out));
buf.shrink(0);
}
}
fn pathEscapeTests() []const EscapeTest {
const ts = []EscapeTest{
EscapeTest{
.in = "",
.out = "",
.err = null,
},
EscapeTest{
.in = "abc",
.out = "abc",
.err = null,
},
EscapeTest{
.in = "abc+def",
.out = "abc+def",
.err = null,
},
EscapeTest{
.in = "one two",
.out = "one%20two",
.err = null,
},
EscapeTest{
.in = "10%",
.out = "10%25",
.err = null,
},
EscapeTest{
.in = " ?&=#+%!<>#\"{}|\\^[]`☺\t:/@$'()*,;",
.out = "%20%3F&=%23+%25%21%3C%3E%23%22%7B%7D%7C%5C%5E%5B%5D%60%E2%98%BA%09:%2F@$%27%28%29%2A%2C%3B",
.err = null,
},
};
return ts[0..];
}
test "PathEscape" {
var buffer = try std.Buffer.init(debug.global_allocator, "");
var buf = &buffer;
defer buf.deinit();
for (pathEscapeTests()) |ts| {
try url.pathEscape(buf, ts.in);
assert(buf.eql(ts.out));
buf.shrink(0);
}
}
test "URL" {
var u = url.URL.init(debug.global_allocator);
defer u.deinit();
} | src/net/url/url_test.zig |
const types = @import("types.zig");
const jmap_error = struct {
request_level: struct {
const ProblemDetails = struct {
/// A URI reference [RFC3986] that identifies the problem type.
type: []const u8,
/// A short, human-readable summary of the problem type.
title: []const u8,
/// The HTTP status code [RFC7231, 6] generated by the origin server for
/// this occurrence of the problem.
status: types.UnsignedInt,
/// A human-readable explanation specific to this occurrence of the problem.
detail: []u8,
/// A URI reference that identifies the specific occurrence of the problem.
instance: []u8,
};
const ErrorTemplate = struct {
uri: []const u8,
desc: []const u8,
status: types.UnsignedInt,
};
// TODO verify status codes of the following errors
const unknown_capability = ErrorTemplate{
.uri = "urn:ietf:params:jmap:error:unknownCapability",
.desc = "The client included a capability in the \"using\" " ++
"property of the request that the server does not support.",
.status = 400,
};
const not_json = ErrorTemplate{
.uri = "urn:ietf:params:jmap:error:notJSON",
.desc = "The content type of the request was not \"application/json\" or the " ++
"request did not parse as I-JSON.",
.status = 400,
};
const not_request = ErrorTemplate{
.uri = "urn:ietf:params:jmap:error:notRequest",
.desc = "The request parsed as JSON but did not match the type signature of " ++
"the Request object.",
.status = 400,
};
// TODO add extra property to the error
const limit = ErrorTemplate{
.uri = "urn:ietf:params:jmap:error:limit",
.desc = "The request was not processed as it would have exceeded one of the " ++
"request limits defined on the capability object, such as " ++
"maxSizeRequest, maxCallsInRequest, or maxConcurrentRequests.",
.status = 400,
};
fn createError(template: ErrorTemplate, detail: []u8, instance: []u8) ProblemDetails {
return ProblemDetails{
.type = template.uri,
.title = template.desc,
.status = template.status,
.detail = detail,
.instance = instance,
};
}
},
method_level: struct {
const ErrorTemplate = struct {
name: []const u8,
desc: []const u8,
};
const server_unavailable = ErrorTemplate{
.name = "serverUnavailable",
.desc = "Some internal server resource was temporarily unavailable.",
};
const server_fail = ErrorTemplate{
.name = "serverFail",
.desc = "An unexpected or unknown error occurred during the processing of the " ++
"call. The method call made no changes to the server's state.",
};
const server_partial_fail = ErrorTemplate{
.name = "serverPartialFail",
.desc = "Some, but not all, expected changes described by the method occurred. The " ++
"client MUST resynchronise impacted data to determine server state.",
};
const unknown_method = ErrorTemplate{
.name = "unknownMethod",
.desc = "The server does not recognise this method name.",
};
const invalid_arguments = ErrorTemplate{
.name = "invalidArguments",
.desc = "One of the arguments is of the wrong type or is otherwise invalid, or " ++
"a required argument is missing.",
};
const invalid_result_reference = ErrorTemplate{
.name = "invalidResultReference",
.desc = "The method used a result reference for one of its arguments, but this " ++
"failed to resolve.",
};
const forbidden = ErrorTemplate{
.name = "forbidden",
.desc = "The method and arguments are valid, but executing the method would " ++
"violate an Access Control List (ACL) or other permissions policy.",
};
const account_not_found = ErrorTemplate{
.name = "accountNotFound",
.desc = "The accountId does not correspond to a valid account.",
};
const account_not_supported_by_method = ErrorTemplate{
.name = "accountNotSupportedByMethod",
.desc = "The accountId given corresponds to a valid account, but the account does " ++
"not support this method or data type.",
};
const account_read_only = ErrorTemplate{
.name = "accountReadOnly",
.desc = "This method modifies state, but the account is read-only.",
};
},
}; | error.zig |
const common = @import("common.zig");
pub const InitializeParams = struct {
pub const method = "initialize";
pub const kind = common.PacketKind.request;
capabilities: ClientCapabilities,
workspaceFolders: ?[]const common.WorkspaceFolder,
};
pub const RegularExpressEngineVersion = union(enum) {
none: void,
string: []const u8,
};
/// Client capabilities specific to regular expressions.
///
/// [Docs](https://microsoft.github.io/language-server-protocol/specifications/specification-3-17/#regExp)
pub const RegularExpressionsClientCapabilities = struct {
/// The engine's name.
engine: []const u8,
/// The engine's version.
version: RegularExpressEngineVersion,
};
/// [Docs](https://microsoft.github.io/language-server-protocol/specifications/specification-3-17/#semanticTokensClientCapabilities)
pub const SemanticTokensClientCapabilities = struct {
dynamicRegistration: bool = false,
/// The token types that the client supports.
tokenTypes: []const []const u8,
/// The token modifiers that the client supports.
tokenModifiers: []const []const u8,
/// The formats the clients supports.
formats: []const []const u8,
/// Whether the client supports tokens that can overlap each other.
overlappingTokenSupport: bool = false,
/// Whether the client supports tokens that can span multiple lines.
multilineTokenSupport: bool = false,
};
pub const ShowMessageRequestClientCapabilities = struct {
/// Capabilities specific to the `MessageActionItem` type.
messageActionItem: ?struct {
additionalPropertiesSupport: ?bool = null,
} = null,
};
/// Capabilities that existed in the 2.x version of the protocol are still mandatory for clients.
/// Clients cannot opt out of providing them. So even if a client omits the ClientCapabilities.textDocument.synchronization
/// it is still required that the client provides text document synchronization (e.g. open, changed and close notifications).
///
/// [Docs](https://microsoft.github.io/language-server-protocol/specifications/specification-3-17/#clientCapabilities)
pub const ClientCapabilities = struct {
general: ?struct {
regularExpressions: ?RegularExpressionsClientCapabilities = null,
} = null,
window: ?struct {
showMessage: ?ShowMessageRequestClientCapabilities = null,
} = null,
workspace: ?struct {
workspaceFolders: bool = false,
} = null,
textDocument: ?struct {
semanticTokens: ?SemanticTokensClientCapabilities = null,
hover: ?struct {
contentFormat: []const []const u8 = &.{},
},
completion: ?struct {
completionItem: ?struct {
snippetSupport: bool = false,
documentationFormat: []const []const u8 = &.{},
},
},
} = null,
/// **LSP extension**
///
/// [Docs](https://clangd.llvm.org/extensions.html#utf-8-offsets)
offsetEncoding: []const []const u8 = &.{},
pub const Feature = enum {
window,
show_message,
show_message_action_item,
show_message_additional_properties,
};
/// `ClientCapabilities` is **thicc** so this helps make it simpler :) <3
pub fn isSupported(self: ClientCapabilities, feature: Feature) bool {
return switch (feature) {
.window => self.window != null,
.show_message => self.isSupported(.window) and self.window.?.showMessage != null,
.show_message_action_item => self.isSupported(.show_message) and self.window.?.showMessage.?.messageActionItem != null,
.show_message_additional_properties => self.isSupported(.show_message_action_item) and if (self.window.?.showMessage.?.messageActionItem.?.additionalPropertiesSupport) |v| v else false,
};
}
};
/// [Docs](https://microsoft.github.io/language-server-protocol/specifications/specification-3-17/#initializeResult)
pub const InitializeResult = struct {
offsetEncoding: []const u8,
capabilities: struct {
signatureHelpProvider: struct {
triggerCharacters: []const []const u8,
retriggerCharacters: []const []const u8,
},
textDocumentSync: enum(u32) {
none = 0,
full = 1,
incremental = 2,
usingnamespace common.EnumStringify(@This());
},
renameProvider: bool,
completionProvider: struct {
resolveProvider: bool,
triggerCharacters: []const []const u8,
},
documentHighlightProvider: bool,
hoverProvider: bool,
codeActionProvider: bool,
declarationProvider: bool,
definitionProvider: bool,
typeDefinitionProvider: bool,
implementationProvider: bool,
referencesProvider: bool,
documentSymbolProvider: bool,
colorProvider: bool,
documentFormattingProvider: bool,
documentRangeFormattingProvider: bool,
foldingRangeProvider: bool,
selectionRangeProvider: bool,
workspaceSymbolProvider: bool,
rangeProvider: bool,
documentProvider: bool,
workspace: ?struct {
workspaceFolders: ?struct {
supported: bool,
changeNotifications: bool,
},
},
semanticTokensProvider: ?struct {
full: bool,
range: bool,
legend: struct {
tokenTypes: []const []const u8,
tokenModifiers: []const []const u8,
},
} = null,
},
serverInfo: struct {
name: []const u8,
version: ?[]const u8 = null,
},
};
pub const InitializedParams = struct {
pub const method = "initialized";
pub const kind = common.PacketKind.notification;
}; | src/types/general.zig |
const AstGen = @This();
const std = @import("std");
const ast = std.zig.ast;
const mem = std.mem;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const ArrayListUnmanaged = std.ArrayListUnmanaged;
const Value = @import("value.zig").Value;
const Type = @import("type.zig").Type;
const TypedValue = @import("TypedValue.zig");
const zir = @import("zir.zig");
const Module = @import("Module.zig");
const trace = @import("tracy.zig").trace;
const Scope = Module.Scope;
const GenZir = Scope.GenZir;
const InnerError = Module.InnerError;
const Decl = Module.Decl;
const LazySrcLoc = Module.LazySrcLoc;
const BuiltinFn = @import("BuiltinFn.zig");
instructions: std.MultiArrayList(zir.Inst) = .{},
string_bytes: ArrayListUnmanaged(u8) = .{},
extra: ArrayListUnmanaged(u32) = .{},
decl_map: std.StringArrayHashMapUnmanaged(void) = .{},
decls: ArrayListUnmanaged(*Decl) = .{},
/// The end of special indexes. `zir.Inst.Ref` subtracts against this number to convert
/// to `zir.Inst.Index`. The default here is correct if there are 0 parameters.
ref_start_index: u32 = zir.Inst.Ref.typed_value_map.len,
mod: *Module,
decl: *Decl,
arena: *Allocator,
/// Call `deinit` on the result.
pub fn init(mod: *Module, decl: *Decl, arena: *Allocator) !AstGen {
var astgen: AstGen = .{
.mod = mod,
.decl = decl,
.arena = arena,
};
// Must be a block instruction at index 0 with the root body.
try astgen.instructions.append(mod.gpa, .{
.tag = .block,
.data = .{ .pl_node = .{
.src_node = 0,
.payload_index = undefined,
} },
});
return astgen;
}
pub fn addExtra(astgen: *AstGen, extra: anytype) Allocator.Error!u32 {
const fields = std.meta.fields(@TypeOf(extra));
try astgen.extra.ensureCapacity(astgen.mod.gpa, astgen.extra.items.len + fields.len);
return addExtraAssumeCapacity(astgen, extra);
}
pub fn addExtraAssumeCapacity(astgen: *AstGen, extra: anytype) u32 {
const fields = std.meta.fields(@TypeOf(extra));
const result = @intCast(u32, astgen.extra.items.len);
inline for (fields) |field| {
astgen.extra.appendAssumeCapacity(switch (field.field_type) {
u32 => @field(extra, field.name),
zir.Inst.Ref => @enumToInt(@field(extra, field.name)),
else => @compileError("bad field type"),
});
}
return result;
}
pub fn appendRefs(astgen: *AstGen, refs: []const zir.Inst.Ref) !void {
const coerced = @bitCast([]const u32, refs);
return astgen.extra.appendSlice(astgen.mod.gpa, coerced);
}
pub fn appendRefsAssumeCapacity(astgen: *AstGen, refs: []const zir.Inst.Ref) void {
const coerced = @bitCast([]const u32, refs);
astgen.extra.appendSliceAssumeCapacity(coerced);
}
pub fn refIsNoReturn(astgen: AstGen, inst_ref: zir.Inst.Ref) bool {
if (inst_ref == .unreachable_value) return true;
if (astgen.refToIndex(inst_ref)) |inst_index| {
return astgen.instructions.items(.tag)[inst_index].isNoReturn();
}
return false;
}
pub fn indexToRef(astgen: AstGen, inst: zir.Inst.Index) zir.Inst.Ref {
return @intToEnum(zir.Inst.Ref, astgen.ref_start_index + inst);
}
pub fn refToIndex(astgen: AstGen, inst: zir.Inst.Ref) ?zir.Inst.Index {
const ref_int = @enumToInt(inst);
if (ref_int >= astgen.ref_start_index) {
return ref_int - astgen.ref_start_index;
} else {
return null;
}
}
pub fn deinit(astgen: *AstGen) void {
const gpa = astgen.mod.gpa;
astgen.instructions.deinit(gpa);
astgen.extra.deinit(gpa);
astgen.string_bytes.deinit(gpa);
astgen.decl_map.deinit(gpa);
astgen.decls.deinit(gpa);
}
pub const ResultLoc = union(enum) {
/// The expression is the right-hand side of assignment to `_`. Only the side-effects of the
/// expression should be generated. The result instruction from the expression must
/// be ignored.
discard,
/// The expression has an inferred type, and it will be evaluated as an rvalue.
none,
/// The expression must generate a pointer rather than a value. For example, the left hand side
/// of an assignment uses this kind of result location.
ref,
/// The expression will be coerced into this type, but it will be evaluated as an rvalue.
ty: zir.Inst.Ref,
/// The expression must store its result into this typed pointer. The result instruction
/// from the expression must be ignored.
ptr: zir.Inst.Ref,
/// The expression must store its result into this allocation, which has an inferred type.
/// The result instruction from the expression must be ignored.
/// Always an instruction with tag `alloc_inferred`.
inferred_ptr: zir.Inst.Ref,
/// There is a pointer for the expression to store its result into, however, its type
/// is inferred based on peer type resolution for a `zir.Inst.Block`.
/// The result instruction from the expression must be ignored.
block_ptr: *GenZir,
pub const Strategy = struct {
elide_store_to_block_ptr_instructions: bool,
tag: Tag,
pub const Tag = enum {
/// Both branches will use break_void; result location is used to communicate the
/// result instruction.
break_void,
/// Use break statements to pass the block result value, and call rvalue() at
/// the end depending on rl. Also elide the store_to_block_ptr instructions
/// depending on rl.
break_operand,
};
};
fn strategy(rl: ResultLoc, block_scope: *GenZir) Strategy {
var elide_store_to_block_ptr_instructions = false;
switch (rl) {
// In this branch there will not be any store_to_block_ptr instructions.
.discard, .none, .ty, .ref => return .{
.tag = .break_operand,
.elide_store_to_block_ptr_instructions = false,
},
// The pointer got passed through to the sub-expressions, so we will use
// break_void here.
// In this branch there will not be any store_to_block_ptr instructions.
.ptr => return .{
.tag = .break_void,
.elide_store_to_block_ptr_instructions = false,
},
.inferred_ptr, .block_ptr => {
if (block_scope.rvalue_rl_count == block_scope.break_count) {
// Neither prong of the if consumed the result location, so we can
// use break instructions to create an rvalue.
return .{
.tag = .break_operand,
.elide_store_to_block_ptr_instructions = true,
};
} else {
// Allow the store_to_block_ptr instructions to remain so that
// semantic analysis can turn them into bitcasts.
return .{
.tag = .break_void,
.elide_store_to_block_ptr_instructions = false,
};
}
},
}
}
};
pub fn typeExpr(gz: *GenZir, scope: *Scope, type_node: ast.Node.Index) InnerError!zir.Inst.Ref {
return expr(gz, scope, .{ .ty = .type_type }, type_node);
}
fn lvalExpr(gz: *GenZir, scope: *Scope, node: ast.Node.Index) InnerError!zir.Inst.Ref {
const tree = gz.tree();
const node_tags = tree.nodes.items(.tag);
const main_tokens = tree.nodes.items(.main_token);
switch (node_tags[node]) {
.root => unreachable,
.@"usingnamespace" => unreachable,
.test_decl => unreachable,
.global_var_decl => unreachable,
.local_var_decl => unreachable,
.simple_var_decl => unreachable,
.aligned_var_decl => unreachable,
.switch_case => unreachable,
.switch_case_one => unreachable,
.container_field_init => unreachable,
.container_field_align => unreachable,
.container_field => unreachable,
.asm_output => unreachable,
.asm_input => unreachable,
.assign,
.assign_bit_and,
.assign_bit_or,
.assign_bit_shift_left,
.assign_bit_shift_right,
.assign_bit_xor,
.assign_div,
.assign_sub,
.assign_sub_wrap,
.assign_mod,
.assign_add,
.assign_add_wrap,
.assign_mul,
.assign_mul_wrap,
.add,
.add_wrap,
.sub,
.sub_wrap,
.mul,
.mul_wrap,
.div,
.mod,
.bit_and,
.bit_or,
.bit_shift_left,
.bit_shift_right,
.bit_xor,
.bang_equal,
.equal_equal,
.greater_than,
.greater_or_equal,
.less_than,
.less_or_equal,
.array_cat,
.array_mult,
.bool_and,
.bool_or,
.@"asm",
.asm_simple,
.string_literal,
.integer_literal,
.call,
.call_comma,
.async_call,
.async_call_comma,
.call_one,
.call_one_comma,
.async_call_one,
.async_call_one_comma,
.unreachable_literal,
.@"return",
.@"if",
.if_simple,
.@"while",
.while_simple,
.while_cont,
.bool_not,
.address_of,
.float_literal,
.undefined_literal,
.true_literal,
.false_literal,
.null_literal,
.optional_type,
.block,
.block_semicolon,
.block_two,
.block_two_semicolon,
.@"break",
.ptr_type_aligned,
.ptr_type_sentinel,
.ptr_type,
.ptr_type_bit_range,
.array_type,
.array_type_sentinel,
.enum_literal,
.multiline_string_literal,
.char_literal,
.@"defer",
.@"errdefer",
.@"catch",
.error_union,
.merge_error_sets,
.switch_range,
.@"await",
.bit_not,
.negation,
.negation_wrap,
.@"resume",
.@"try",
.slice,
.slice_open,
.slice_sentinel,
.array_init_one,
.array_init_one_comma,
.array_init_dot_two,
.array_init_dot_two_comma,
.array_init_dot,
.array_init_dot_comma,
.array_init,
.array_init_comma,
.struct_init_one,
.struct_init_one_comma,
.struct_init_dot_two,
.struct_init_dot_two_comma,
.struct_init_dot,
.struct_init_dot_comma,
.struct_init,
.struct_init_comma,
.@"switch",
.switch_comma,
.@"for",
.for_simple,
.@"suspend",
.@"continue",
.@"anytype",
.fn_proto_simple,
.fn_proto_multi,
.fn_proto_one,
.fn_proto,
.fn_decl,
.anyframe_type,
.anyframe_literal,
.error_set_decl,
.container_decl,
.container_decl_trailing,
.container_decl_two,
.container_decl_two_trailing,
.container_decl_arg,
.container_decl_arg_trailing,
.tagged_union,
.tagged_union_trailing,
.tagged_union_two,
.tagged_union_two_trailing,
.tagged_union_enum_tag,
.tagged_union_enum_tag_trailing,
.@"comptime",
.@"nosuspend",
.error_value,
=> return gz.astgen.mod.failNode(scope, node, "invalid left-hand side to assignment", .{}),
.builtin_call,
.builtin_call_comma,
.builtin_call_two,
.builtin_call_two_comma,
=> {
const builtin_token = main_tokens[node];
const builtin_name = tree.tokenSlice(builtin_token);
// If the builtin is an invalid name, we don't cause an error here; instead
// let it pass, and the error will be "invalid builtin function" later.
if (BuiltinFn.list.get(builtin_name)) |info| {
if (!info.allows_lvalue) {
return gz.astgen.mod.failNode(scope, node, "invalid left-hand side to assignment", .{});
}
}
},
// These can be assigned to.
.unwrap_optional,
.deref,
.field_access,
.array_access,
.identifier,
.grouped_expression,
.@"orelse",
=> {},
}
return expr(gz, scope, .ref, node);
}
/// Turn Zig AST into untyped ZIR istructions.
/// When `rl` is discard, ptr, inferred_ptr, or inferred_ptr, the
/// result instruction can be used to inspect whether it is isNoReturn() but that is it,
/// it must otherwise not be used.
pub fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerError!zir.Inst.Ref {
const mod = gz.astgen.mod;
const tree = gz.tree();
const main_tokens = tree.nodes.items(.main_token);
const token_tags = tree.tokens.items(.tag);
const node_datas = tree.nodes.items(.data);
const node_tags = tree.nodes.items(.tag);
switch (node_tags[node]) {
.root => unreachable, // Top-level declaration.
.@"usingnamespace" => unreachable, // Top-level declaration.
.test_decl => unreachable, // Top-level declaration.
.container_field_init => unreachable, // Top-level declaration.
.container_field_align => unreachable, // Top-level declaration.
.container_field => unreachable, // Top-level declaration.
.fn_decl => unreachable, // Top-level declaration.
.global_var_decl => unreachable, // Handled in `blockExpr`.
.local_var_decl => unreachable, // Handled in `blockExpr`.
.simple_var_decl => unreachable, // Handled in `blockExpr`.
.aligned_var_decl => unreachable, // Handled in `blockExpr`.
.switch_case => unreachable, // Handled in `switchExpr`.
.switch_case_one => unreachable, // Handled in `switchExpr`.
.switch_range => unreachable, // Handled in `switchExpr`.
.asm_output => unreachable, // Handled in `asmExpr`.
.asm_input => unreachable, // Handled in `asmExpr`.
.assign => {
try assign(gz, scope, node);
return rvalue(gz, scope, rl, .void_value, node);
},
.assign_bit_and => {
try assignOp(gz, scope, node, .bit_and);
return rvalue(gz, scope, rl, .void_value, node);
},
.assign_bit_or => {
try assignOp(gz, scope, node, .bit_or);
return rvalue(gz, scope, rl, .void_value, node);
},
.assign_bit_shift_left => {
try assignOp(gz, scope, node, .shl);
return rvalue(gz, scope, rl, .void_value, node);
},
.assign_bit_shift_right => {
try assignOp(gz, scope, node, .shr);
return rvalue(gz, scope, rl, .void_value, node);
},
.assign_bit_xor => {
try assignOp(gz, scope, node, .xor);
return rvalue(gz, scope, rl, .void_value, node);
},
.assign_div => {
try assignOp(gz, scope, node, .div);
return rvalue(gz, scope, rl, .void_value, node);
},
.assign_sub => {
try assignOp(gz, scope, node, .sub);
return rvalue(gz, scope, rl, .void_value, node);
},
.assign_sub_wrap => {
try assignOp(gz, scope, node, .subwrap);
return rvalue(gz, scope, rl, .void_value, node);
},
.assign_mod => {
try assignOp(gz, scope, node, .mod_rem);
return rvalue(gz, scope, rl, .void_value, node);
},
.assign_add => {
try assignOp(gz, scope, node, .add);
return rvalue(gz, scope, rl, .void_value, node);
},
.assign_add_wrap => {
try assignOp(gz, scope, node, .addwrap);
return rvalue(gz, scope, rl, .void_value, node);
},
.assign_mul => {
try assignOp(gz, scope, node, .mul);
return rvalue(gz, scope, rl, .void_value, node);
},
.assign_mul_wrap => {
try assignOp(gz, scope, node, .mulwrap);
return rvalue(gz, scope, rl, .void_value, node);
},
.add => return simpleBinOp(gz, scope, rl, node, .add),
.add_wrap => return simpleBinOp(gz, scope, rl, node, .addwrap),
.sub => return simpleBinOp(gz, scope, rl, node, .sub),
.sub_wrap => return simpleBinOp(gz, scope, rl, node, .subwrap),
.mul => return simpleBinOp(gz, scope, rl, node, .mul),
.mul_wrap => return simpleBinOp(gz, scope, rl, node, .mulwrap),
.div => return simpleBinOp(gz, scope, rl, node, .div),
.mod => return simpleBinOp(gz, scope, rl, node, .mod_rem),
.bit_and => return simpleBinOp(gz, scope, rl, node, .bit_and),
.bit_or => return simpleBinOp(gz, scope, rl, node, .bit_or),
.bit_shift_left => return simpleBinOp(gz, scope, rl, node, .shl),
.bit_shift_right => return simpleBinOp(gz, scope, rl, node, .shr),
.bit_xor => return simpleBinOp(gz, scope, rl, node, .xor),
.bang_equal => return simpleBinOp(gz, scope, rl, node, .cmp_neq),
.equal_equal => return simpleBinOp(gz, scope, rl, node, .cmp_eq),
.greater_than => return simpleBinOp(gz, scope, rl, node, .cmp_gt),
.greater_or_equal => return simpleBinOp(gz, scope, rl, node, .cmp_gte),
.less_than => return simpleBinOp(gz, scope, rl, node, .cmp_lt),
.less_or_equal => return simpleBinOp(gz, scope, rl, node, .cmp_lte),
.array_cat => return simpleBinOp(gz, scope, rl, node, .array_cat),
.array_mult => return simpleBinOp(gz, scope, rl, node, .array_mul),
.error_union => return simpleBinOp(gz, scope, rl, node, .error_union_type),
.merge_error_sets => return simpleBinOp(gz, scope, rl, node, .merge_error_sets),
.bool_and => return boolBinOp(gz, scope, rl, node, .bool_br_and),
.bool_or => return boolBinOp(gz, scope, rl, node, .bool_br_or),
.bool_not => return boolNot(gz, scope, rl, node),
.bit_not => return bitNot(gz, scope, rl, node),
.negation => return negation(gz, scope, rl, node, .negate),
.negation_wrap => return negation(gz, scope, rl, node, .negate_wrap),
.identifier => return identifier(gz, scope, rl, node),
.asm_simple => return asmExpr(gz, scope, rl, node, tree.asmSimple(node)),
.@"asm" => return asmExpr(gz, scope, rl, node, tree.asmFull(node)),
.string_literal => return stringLiteral(gz, scope, rl, node),
.multiline_string_literal => return multilineStringLiteral(gz, scope, rl, node),
.integer_literal => return integerLiteral(gz, scope, rl, node),
.builtin_call_two, .builtin_call_two_comma => {
if (node_datas[node].lhs == 0) {
const params = [_]ast.Node.Index{};
return builtinCall(gz, scope, rl, node, ¶ms);
} else if (node_datas[node].rhs == 0) {
const params = [_]ast.Node.Index{node_datas[node].lhs};
return builtinCall(gz, scope, rl, node, ¶ms);
} else {
const params = [_]ast.Node.Index{ node_datas[node].lhs, node_datas[node].rhs };
return builtinCall(gz, scope, rl, node, ¶ms);
}
},
.builtin_call, .builtin_call_comma => {
const params = tree.extra_data[node_datas[node].lhs..node_datas[node].rhs];
return builtinCall(gz, scope, rl, node, params);
},
.call_one, .call_one_comma, .async_call_one, .async_call_one_comma => {
var params: [1]ast.Node.Index = undefined;
return callExpr(gz, scope, rl, node, tree.callOne(¶ms, node));
},
.call, .call_comma, .async_call, .async_call_comma => {
return callExpr(gz, scope, rl, node, tree.callFull(node));
},
.unreachable_literal => {
_ = try gz.addAsIndex(.{
.tag = .@"unreachable",
.data = .{ .@"unreachable" = .{
.safety = true,
.src_node = gz.astgen.decl.nodeIndexToRelative(node),
} },
});
return zir.Inst.Ref.unreachable_value;
},
.@"return" => return ret(gz, scope, node),
.field_access => return fieldAccess(gz, scope, rl, node),
.float_literal => return floatLiteral(gz, scope, rl, node),
.if_simple => return ifExpr(gz, scope, rl, node, tree.ifSimple(node)),
.@"if" => return ifExpr(gz, scope, rl, node, tree.ifFull(node)),
.while_simple => return whileExpr(gz, scope, rl, node, tree.whileSimple(node)),
.while_cont => return whileExpr(gz, scope, rl, node, tree.whileCont(node)),
.@"while" => return whileExpr(gz, scope, rl, node, tree.whileFull(node)),
.for_simple => return forExpr(gz, scope, rl, node, tree.forSimple(node)),
.@"for" => return forExpr(gz, scope, rl, node, tree.forFull(node)),
.slice_open => {
const lhs = try expr(gz, scope, .ref, node_datas[node].lhs);
const start = try expr(gz, scope, .{ .ty = .usize_type }, node_datas[node].rhs);
const result = try gz.addPlNode(.slice_start, node, zir.Inst.SliceStart{
.lhs = lhs,
.start = start,
});
return rvalue(gz, scope, rl, result, node);
},
.slice => {
const lhs = try expr(gz, scope, .ref, node_datas[node].lhs);
const extra = tree.extraData(node_datas[node].rhs, ast.Node.Slice);
const start = try expr(gz, scope, .{ .ty = .usize_type }, extra.start);
const end = try expr(gz, scope, .{ .ty = .usize_type }, extra.end);
const result = try gz.addPlNode(.slice_end, node, zir.Inst.SliceEnd{
.lhs = lhs,
.start = start,
.end = end,
});
return rvalue(gz, scope, rl, result, node);
},
.slice_sentinel => {
const lhs = try expr(gz, scope, .ref, node_datas[node].lhs);
const extra = tree.extraData(node_datas[node].rhs, ast.Node.SliceSentinel);
const start = try expr(gz, scope, .{ .ty = .usize_type }, extra.start);
const end = try expr(gz, scope, .{ .ty = .usize_type }, extra.end);
const sentinel = try expr(gz, scope, .{ .ty = .usize_type }, extra.sentinel);
const result = try gz.addPlNode(.slice_sentinel, node, zir.Inst.SliceSentinel{
.lhs = lhs,
.start = start,
.end = end,
.sentinel = sentinel,
});
return rvalue(gz, scope, rl, result, node);
},
.deref => {
const lhs = try expr(gz, scope, .none, node_datas[node].lhs);
const result = try gz.addUnNode(.load, lhs, node);
return rvalue(gz, scope, rl, result, node);
},
.address_of => {
const result = try expr(gz, scope, .ref, node_datas[node].lhs);
return rvalue(gz, scope, rl, result, node);
},
.undefined_literal => return rvalue(gz, scope, rl, .undef, node),
.true_literal => return rvalue(gz, scope, rl, .bool_true, node),
.false_literal => return rvalue(gz, scope, rl, .bool_false, node),
.null_literal => return rvalue(gz, scope, rl, .null_value, node),
.optional_type => {
const operand = try typeExpr(gz, scope, node_datas[node].lhs);
const result = try gz.addUnNode(.optional_type, operand, node);
return rvalue(gz, scope, rl, result, node);
},
.unwrap_optional => switch (rl) {
.ref => return gz.addUnNode(
.optional_payload_safe_ptr,
try expr(gz, scope, .ref, node_datas[node].lhs),
node,
),
else => return rvalue(gz, scope, rl, try gz.addUnNode(
.optional_payload_safe,
try expr(gz, scope, .none, node_datas[node].lhs),
node,
), node),
},
.block_two, .block_two_semicolon => {
const statements = [2]ast.Node.Index{ node_datas[node].lhs, node_datas[node].rhs };
if (node_datas[node].lhs == 0) {
return blockExpr(gz, scope, rl, node, statements[0..0]);
} else if (node_datas[node].rhs == 0) {
return blockExpr(gz, scope, rl, node, statements[0..1]);
} else {
return blockExpr(gz, scope, rl, node, statements[0..2]);
}
},
.block, .block_semicolon => {
const statements = tree.extra_data[node_datas[node].lhs..node_datas[node].rhs];
return blockExpr(gz, scope, rl, node, statements);
},
.enum_literal => return simpleStrTok(gz, scope, rl, main_tokens[node], node, .enum_literal),
.error_value => return simpleStrTok(gz, scope, rl, node_datas[node].rhs, node, .error_value),
.anyframe_literal => return mod.failNode(scope, node, "async and related features are not yet supported", .{}),
.anyframe_type => return mod.failNode(scope, node, "async and related features are not yet supported", .{}),
.@"catch" => {
const catch_token = main_tokens[node];
const payload_token: ?ast.TokenIndex = if (token_tags[catch_token + 1] == .pipe)
catch_token + 2
else
null;
switch (rl) {
.ref => return orelseCatchExpr(
gz,
scope,
rl,
node,
node_datas[node].lhs,
.is_err_ptr,
.err_union_payload_unsafe_ptr,
.err_union_code_ptr,
node_datas[node].rhs,
payload_token,
),
else => return orelseCatchExpr(
gz,
scope,
rl,
node,
node_datas[node].lhs,
.is_err,
.err_union_payload_unsafe,
.err_union_code,
node_datas[node].rhs,
payload_token,
),
}
},
.@"orelse" => switch (rl) {
.ref => return orelseCatchExpr(
gz,
scope,
rl,
node,
node_datas[node].lhs,
.is_null_ptr,
.optional_payload_unsafe_ptr,
undefined,
node_datas[node].rhs,
null,
),
else => return orelseCatchExpr(
gz,
scope,
rl,
node,
node_datas[node].lhs,
.is_null,
.optional_payload_unsafe,
undefined,
node_datas[node].rhs,
null,
),
},
.ptr_type_aligned => return ptrType(gz, scope, rl, node, tree.ptrTypeAligned(node)),
.ptr_type_sentinel => return ptrType(gz, scope, rl, node, tree.ptrTypeSentinel(node)),
.ptr_type => return ptrType(gz, scope, rl, node, tree.ptrType(node)),
.ptr_type_bit_range => return ptrType(gz, scope, rl, node, tree.ptrTypeBitRange(node)),
.container_decl,
.container_decl_trailing,
=> return containerDecl(gz, scope, rl, node, tree.containerDecl(node)),
.container_decl_two, .container_decl_two_trailing => {
var buffer: [2]ast.Node.Index = undefined;
return containerDecl(gz, scope, rl, node, tree.containerDeclTwo(&buffer, node));
},
.container_decl_arg,
.container_decl_arg_trailing,
=> return containerDecl(gz, scope, rl, node, tree.containerDeclArg(node)),
.tagged_union,
.tagged_union_trailing,
=> return containerDecl(gz, scope, rl, node, tree.taggedUnion(node)),
.tagged_union_two, .tagged_union_two_trailing => {
var buffer: [2]ast.Node.Index = undefined;
return containerDecl(gz, scope, rl, node, tree.taggedUnionTwo(&buffer, node));
},
.tagged_union_enum_tag,
.tagged_union_enum_tag_trailing,
=> return containerDecl(gz, scope, rl, node, tree.taggedUnionEnumTag(node)),
.@"break" => return breakExpr(gz, scope, node),
.@"continue" => return continueExpr(gz, scope, node),
.grouped_expression => return expr(gz, scope, rl, node_datas[node].lhs),
.array_type => return arrayType(gz, scope, rl, node),
.array_type_sentinel => return arrayTypeSentinel(gz, scope, rl, node),
.char_literal => return charLiteral(gz, scope, rl, node),
.error_set_decl => return errorSetDecl(gz, scope, rl, node),
.array_access => return arrayAccess(gz, scope, rl, node),
.@"comptime" => return comptimeExpr(gz, scope, rl, node_datas[node].lhs),
.@"switch", .switch_comma => return switchExpr(gz, scope, rl, node),
.@"nosuspend" => return mod.failNode(scope, node, "async and related features are not yet supported", .{}),
.@"suspend" => return mod.failNode(scope, node, "async and related features are not yet supported", .{}),
.@"await" => return mod.failNode(scope, node, "async and related features are not yet supported", .{}),
.@"resume" => return mod.failNode(scope, node, "async and related features are not yet supported", .{}),
.@"defer" => return mod.failNode(scope, node, "TODO implement astgen.expr for .defer", .{}),
.@"errdefer" => return mod.failNode(scope, node, "TODO implement astgen.expr for .errdefer", .{}),
.@"try" => return mod.failNode(scope, node, "TODO implement astgen.expr for .Try", .{}),
.array_init_one,
.array_init_one_comma,
.array_init_dot_two,
.array_init_dot_two_comma,
.array_init_dot,
.array_init_dot_comma,
.array_init,
.array_init_comma,
=> return mod.failNode(scope, node, "TODO implement astgen.expr for array literals", .{}),
.struct_init_one, .struct_init_one_comma => {
var fields: [1]ast.Node.Index = undefined;
return structInitExpr(gz, scope, rl, node, tree.structInitOne(&fields, node));
},
.struct_init_dot_two, .struct_init_dot_two_comma => {
var fields: [2]ast.Node.Index = undefined;
return structInitExpr(gz, scope, rl, node, tree.structInitDotTwo(&fields, node));
},
.struct_init_dot,
.struct_init_dot_comma,
=> return structInitExpr(gz, scope, rl, node, tree.structInitDot(node)),
.struct_init,
.struct_init_comma,
=> return structInitExpr(gz, scope, rl, node, tree.structInit(node)),
.@"anytype" => return mod.failNode(scope, node, "TODO implement astgen.expr for .anytype", .{}),
.fn_proto_simple,
.fn_proto_multi,
.fn_proto_one,
.fn_proto,
=> return mod.failNode(scope, node, "TODO implement astgen.expr for function prototypes", .{}),
}
}
pub fn structInitExpr(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
node: ast.Node.Index,
struct_init: ast.full.StructInit,
) InnerError!zir.Inst.Ref {
const tree = gz.tree();
const astgen = gz.astgen;
const mod = astgen.mod;
const gpa = mod.gpa;
if (struct_init.ast.fields.len == 0) {
if (struct_init.ast.type_expr == 0) {
return rvalue(gz, scope, rl, .empty_struct, node);
} else {
const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr);
const result = try gz.addUnNode(.struct_init_empty, ty_inst, node);
return rvalue(gz, scope, rl, result, node);
}
}
switch (rl) {
.discard => return mod.failNode(scope, node, "TODO implement structInitExpr discard", .{}),
.none => return mod.failNode(scope, node, "TODO implement structInitExpr none", .{}),
.ref => unreachable, // struct literal not valid as l-value
.ty => |ty_inst| {
return mod.failNode(scope, node, "TODO implement structInitExpr ty", .{});
},
.ptr => |ptr_inst| {
const field_ptr_list = try gpa.alloc(zir.Inst.Index, struct_init.ast.fields.len);
defer gpa.free(field_ptr_list);
for (struct_init.ast.fields) |field_init, i| {
const name_token = tree.firstToken(field_init) - 2;
const str_index = try gz.identAsString(name_token);
const field_ptr = try gz.addPlNode(.field_ptr, field_init, zir.Inst.Field{
.lhs = ptr_inst,
.field_name_start = str_index,
});
field_ptr_list[i] = astgen.refToIndex(field_ptr).?;
_ = try expr(gz, scope, .{ .ptr = field_ptr }, field_init);
}
const validate_inst = try gz.addPlNode(.validate_struct_init_ptr, node, zir.Inst.Block{
.body_len = @intCast(u32, field_ptr_list.len),
});
try astgen.extra.appendSlice(gpa, field_ptr_list);
return validate_inst;
},
.inferred_ptr => |ptr_inst| {
return mod.failNode(scope, node, "TODO implement structInitExpr inferred_ptr", .{});
},
.block_ptr => |block_gz| {
return mod.failNode(scope, node, "TODO implement structInitExpr block", .{});
},
}
}
pub fn comptimeExpr(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
node: ast.Node.Index,
) InnerError!zir.Inst.Ref {
const prev_force_comptime = gz.force_comptime;
gz.force_comptime = true;
const result = try expr(gz, scope, rl, node);
gz.force_comptime = prev_force_comptime;
return result;
}
fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: ast.Node.Index) InnerError!zir.Inst.Ref {
const mod = parent_gz.astgen.mod;
const tree = parent_gz.tree();
const node_datas = tree.nodes.items(.data);
const break_label = node_datas[node].lhs;
const rhs = node_datas[node].rhs;
// Look for the label in the scope.
var scope = parent_scope;
while (true) {
switch (scope.tag) {
.gen_zir => {
const block_gz = scope.cast(GenZir).?;
const block_inst = blk: {
if (break_label != 0) {
if (block_gz.label) |*label| {
if (try tokenIdentEql(mod, parent_scope, label.token, break_label)) {
label.used = true;
break :blk label.block_inst;
}
}
} else if (block_gz.break_block != 0) {
break :blk block_gz.break_block;
}
scope = block_gz.parent;
continue;
};
if (rhs == 0) {
_ = try parent_gz.addBreak(.@"break", block_inst, .void_value);
return zir.Inst.Ref.unreachable_value;
}
block_gz.break_count += 1;
const prev_rvalue_rl_count = block_gz.rvalue_rl_count;
const operand = try expr(parent_gz, parent_scope, block_gz.break_result_loc, rhs);
const have_store_to_block = block_gz.rvalue_rl_count != prev_rvalue_rl_count;
const br = try parent_gz.addBreak(.@"break", block_inst, operand);
if (block_gz.break_result_loc == .block_ptr) {
try block_gz.labeled_breaks.append(mod.gpa, br);
if (have_store_to_block) {
const zir_tags = parent_gz.astgen.instructions.items(.tag);
const zir_datas = parent_gz.astgen.instructions.items(.data);
const store_inst = @intCast(u32, zir_tags.len - 2);
assert(zir_tags[store_inst] == .store_to_block_ptr);
assert(zir_datas[store_inst].bin.lhs == block_gz.rl_ptr);
try block_gz.labeled_store_to_block_ptr_list.append(mod.gpa, store_inst);
}
}
return zir.Inst.Ref.unreachable_value;
},
.local_val => scope = scope.cast(Scope.LocalVal).?.parent,
.local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent,
else => if (break_label != 0) {
const label_name = try mod.identifierTokenString(parent_scope, break_label);
return mod.failTok(parent_scope, break_label, "label not found: '{s}'", .{label_name});
} else {
return mod.failNode(parent_scope, node, "break expression outside loop", .{});
},
}
}
}
fn continueExpr(parent_gz: *GenZir, parent_scope: *Scope, node: ast.Node.Index) InnerError!zir.Inst.Ref {
const mod = parent_gz.astgen.mod;
const tree = parent_gz.tree();
const node_datas = tree.nodes.items(.data);
const break_label = node_datas[node].lhs;
// Look for the label in the scope.
var scope = parent_scope;
while (true) {
switch (scope.tag) {
.gen_zir => {
const gen_zir = scope.cast(GenZir).?;
const continue_block = gen_zir.continue_block;
if (continue_block == 0) {
scope = gen_zir.parent;
continue;
}
if (break_label != 0) blk: {
if (gen_zir.label) |*label| {
if (try tokenIdentEql(mod, parent_scope, label.token, break_label)) {
label.used = true;
break :blk;
}
}
// found continue but either it has a different label, or no label
scope = gen_zir.parent;
continue;
}
// TODO emit a break_inline if the loop being continued is inline
_ = try parent_gz.addBreak(.@"break", continue_block, .void_value);
return zir.Inst.Ref.unreachable_value;
},
.local_val => scope = scope.cast(Scope.LocalVal).?.parent,
.local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent,
else => if (break_label != 0) {
const label_name = try mod.identifierTokenString(parent_scope, break_label);
return mod.failTok(parent_scope, break_label, "label not found: '{s}'", .{label_name});
} else {
return mod.failNode(parent_scope, node, "continue expression outside loop", .{});
},
}
}
}
pub fn blockExpr(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
block_node: ast.Node.Index,
statements: []const ast.Node.Index,
) InnerError!zir.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const tree = gz.tree();
const main_tokens = tree.nodes.items(.main_token);
const token_tags = tree.tokens.items(.tag);
const lbrace = main_tokens[block_node];
if (token_tags[lbrace - 1] == .colon and
token_tags[lbrace - 2] == .identifier)
{
return labeledBlockExpr(gz, scope, rl, block_node, statements, .block);
}
try blockExprStmts(gz, scope, block_node, statements);
return rvalue(gz, scope, rl, .void_value, block_node);
}
fn checkLabelRedefinition(mod: *Module, parent_scope: *Scope, label: ast.TokenIndex) !void {
// Look for the label in the scope.
var scope = parent_scope;
while (true) {
switch (scope.tag) {
.gen_zir => {
const gen_zir = scope.cast(GenZir).?;
if (gen_zir.label) |prev_label| {
if (try tokenIdentEql(mod, parent_scope, label, prev_label.token)) {
const tree = parent_scope.tree();
const main_tokens = tree.nodes.items(.main_token);
const label_name = try mod.identifierTokenString(parent_scope, label);
const msg = msg: {
const msg = try mod.errMsg(
parent_scope,
gen_zir.tokSrcLoc(label),
"redefinition of label '{s}'",
.{label_name},
);
errdefer msg.destroy(mod.gpa);
try mod.errNote(
parent_scope,
gen_zir.tokSrcLoc(prev_label.token),
msg,
"previous definition is here",
.{},
);
break :msg msg;
};
return mod.failWithOwnedErrorMsg(parent_scope, msg);
}
}
scope = gen_zir.parent;
},
.local_val => scope = scope.cast(Scope.LocalVal).?.parent,
.local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent,
else => return,
}
}
}
fn labeledBlockExpr(
gz: *GenZir,
parent_scope: *Scope,
rl: ResultLoc,
block_node: ast.Node.Index,
statements: []const ast.Node.Index,
zir_tag: zir.Inst.Tag,
) InnerError!zir.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
assert(zir_tag == .block);
const mod = gz.astgen.mod;
const tree = gz.tree();
const main_tokens = tree.nodes.items(.main_token);
const token_tags = tree.tokens.items(.tag);
const lbrace = main_tokens[block_node];
const label_token = lbrace - 2;
assert(token_tags[label_token] == .identifier);
try checkLabelRedefinition(mod, parent_scope, label_token);
// Reserve the Block ZIR instruction index so that we can put it into the GenZir struct
// so that break statements can reference it.
const block_inst = try gz.addBlock(zir_tag, block_node);
try gz.instructions.append(mod.gpa, block_inst);
var block_scope: GenZir = .{
.parent = parent_scope,
.astgen = gz.astgen,
.force_comptime = gz.force_comptime,
.instructions = .{},
// TODO @as here is working around a stage1 miscompilation bug :(
.label = @as(?GenZir.Label, GenZir.Label{
.token = label_token,
.block_inst = block_inst,
}),
};
block_scope.setBreakResultLoc(rl);
defer block_scope.instructions.deinit(mod.gpa);
defer block_scope.labeled_breaks.deinit(mod.gpa);
defer block_scope.labeled_store_to_block_ptr_list.deinit(mod.gpa);
try blockExprStmts(&block_scope, &block_scope.base, block_node, statements);
if (!block_scope.label.?.used) {
return mod.failTok(parent_scope, label_token, "unused block label", .{});
}
const zir_tags = gz.astgen.instructions.items(.tag);
const zir_datas = gz.astgen.instructions.items(.data);
const strat = rl.strategy(&block_scope);
switch (strat.tag) {
.break_void => {
// The code took advantage of the result location as a pointer.
// Turn the break instruction operands into void.
for (block_scope.labeled_breaks.items) |br| {
zir_datas[br].@"break".operand = .void_value;
}
try block_scope.setBlockBody(block_inst);
return gz.astgen.indexToRef(block_inst);
},
.break_operand => {
// All break operands are values that did not use the result location pointer.
if (strat.elide_store_to_block_ptr_instructions) {
for (block_scope.labeled_store_to_block_ptr_list.items) |inst| {
zir_tags[inst] = .elided;
zir_datas[inst] = undefined;
}
// TODO technically not needed since we changed the tag to elided but
// would be better still to elide the ones that are in this list.
}
try block_scope.setBlockBody(block_inst);
const block_ref = gz.astgen.indexToRef(block_inst);
switch (rl) {
.ref => return block_ref,
else => return rvalue(gz, parent_scope, rl, block_ref, block_node),
}
},
}
}
fn blockExprStmts(
gz: *GenZir,
parent_scope: *Scope,
node: ast.Node.Index,
statements: []const ast.Node.Index,
) !void {
const tree = gz.tree();
const main_tokens = tree.nodes.items(.main_token);
const node_tags = tree.nodes.items(.tag);
var block_arena = std.heap.ArenaAllocator.init(gz.astgen.mod.gpa);
defer block_arena.deinit();
var scope = parent_scope;
for (statements) |statement| {
if (!gz.force_comptime) {
_ = try gz.addNode(.dbg_stmt_node, statement);
}
switch (node_tags[statement]) {
.global_var_decl => scope = try varDecl(gz, scope, statement, &block_arena.allocator, tree.globalVarDecl(statement)),
.local_var_decl => scope = try varDecl(gz, scope, statement, &block_arena.allocator, tree.localVarDecl(statement)),
.simple_var_decl => scope = try varDecl(gz, scope, statement, &block_arena.allocator, tree.simpleVarDecl(statement)),
.aligned_var_decl => scope = try varDecl(gz, scope, statement, &block_arena.allocator, tree.alignedVarDecl(statement)),
.assign => try assign(gz, scope, statement),
.assign_bit_and => try assignOp(gz, scope, statement, .bit_and),
.assign_bit_or => try assignOp(gz, scope, statement, .bit_or),
.assign_bit_shift_left => try assignOp(gz, scope, statement, .shl),
.assign_bit_shift_right => try assignOp(gz, scope, statement, .shr),
.assign_bit_xor => try assignOp(gz, scope, statement, .xor),
.assign_div => try assignOp(gz, scope, statement, .div),
.assign_sub => try assignOp(gz, scope, statement, .sub),
.assign_sub_wrap => try assignOp(gz, scope, statement, .subwrap),
.assign_mod => try assignOp(gz, scope, statement, .mod_rem),
.assign_add => try assignOp(gz, scope, statement, .add),
.assign_add_wrap => try assignOp(gz, scope, statement, .addwrap),
.assign_mul => try assignOp(gz, scope, statement, .mul),
.assign_mul_wrap => try assignOp(gz, scope, statement, .mulwrap),
else => {
// We need to emit an error if the result is not `noreturn` or `void`, but
// we want to avoid adding the ZIR instruction if possible for performance.
const maybe_unused_result = try expr(gz, scope, .none, statement);
const elide_check = if (gz.astgen.refToIndex(maybe_unused_result)) |inst| b: {
// Note that this array becomes invalid after appending more items to it
// in the above while loop.
const zir_tags = gz.astgen.instructions.items(.tag);
switch (zir_tags[inst]) {
.@"const" => {
const tv = gz.astgen.instructions.items(.data)[inst].@"const";
break :b switch (tv.ty.zigTypeTag()) {
.NoReturn, .Void => true,
else => false,
};
},
// For some instructions, swap in a slightly different ZIR tag
// so we can avoid a separate ensure_result_used instruction.
.call_none_chkused => unreachable,
.call_none => {
zir_tags[inst] = .call_none_chkused;
break :b true;
},
.call_chkused => unreachable,
.call => {
zir_tags[inst] = .call_chkused;
break :b true;
},
// ZIR instructions that might be a type other than `noreturn` or `void`.
.add,
.addwrap,
.alloc,
.alloc_mut,
.alloc_inferred,
.alloc_inferred_mut,
.array_cat,
.array_mul,
.array_type,
.array_type_sentinel,
.indexable_ptr_len,
.as,
.as_node,
.@"asm",
.asm_volatile,
.bit_and,
.bitcast,
.bitcast_result_ptr,
.bit_or,
.block,
.block_inline,
.loop,
.bool_br_and,
.bool_br_or,
.bool_not,
.bool_and,
.bool_or,
.call_compile_time,
.cmp_lt,
.cmp_lte,
.cmp_eq,
.cmp_gte,
.cmp_gt,
.cmp_neq,
.coerce_result_ptr,
.decl_ref,
.decl_val,
.load,
.div,
.elem_ptr,
.elem_val,
.elem_ptr_node,
.elem_val_node,
.floatcast,
.field_ptr,
.field_val,
.field_ptr_named,
.field_val_named,
.fn_type,
.fn_type_var_args,
.fn_type_cc,
.fn_type_cc_var_args,
.int,
.intcast,
.int_type,
.is_non_null,
.is_null,
.is_non_null_ptr,
.is_null_ptr,
.is_err,
.is_err_ptr,
.mod_rem,
.mul,
.mulwrap,
.param_type,
.ptrtoint,
.ref,
.ret_ptr,
.ret_type,
.shl,
.shr,
.str,
.sub,
.subwrap,
.negate,
.negate_wrap,
.typeof,
.typeof_elem,
.xor,
.optional_type,
.optional_type_from_ptr_elem,
.optional_payload_safe,
.optional_payload_unsafe,
.optional_payload_safe_ptr,
.optional_payload_unsafe_ptr,
.err_union_payload_safe,
.err_union_payload_unsafe,
.err_union_payload_safe_ptr,
.err_union_payload_unsafe_ptr,
.err_union_code,
.err_union_code_ptr,
.ptr_type,
.ptr_type_simple,
.enum_literal,
.enum_literal_small,
.merge_error_sets,
.error_union_type,
.bit_not,
.error_value,
.error_to_int,
.int_to_error,
.slice_start,
.slice_end,
.slice_sentinel,
.import,
.typeof_peer,
.switch_block,
.switch_block_multi,
.switch_block_else,
.switch_block_else_multi,
.switch_block_under,
.switch_block_under_multi,
.switch_block_ref,
.switch_block_ref_multi,
.switch_block_ref_else,
.switch_block_ref_else_multi,
.switch_block_ref_under,
.switch_block_ref_under_multi,
.switch_capture,
.switch_capture_ref,
.switch_capture_multi,
.switch_capture_multi_ref,
.switch_capture_else,
.switch_capture_else_ref,
.struct_init_empty,
.struct_decl,
.struct_decl_packed,
.struct_decl_extern,
.union_decl,
.enum_decl,
.opaque_decl,
=> break :b false,
// ZIR instructions that are always either `noreturn` or `void`.
.breakpoint,
.dbg_stmt_node,
.ensure_result_used,
.ensure_result_non_error,
.set_eval_branch_quota,
.compile_log,
.ensure_err_payload_void,
.@"break",
.break_inline,
.condbr,
.condbr_inline,
.compile_error,
.ret_node,
.ret_tok,
.ret_coerce,
.@"unreachable",
.elided,
.store,
.store_node,
.store_to_block_ptr,
.store_to_inferred_ptr,
.resolve_inferred_alloc,
.repeat,
.repeat_inline,
.validate_struct_init_ptr,
=> break :b true,
}
} else switch (maybe_unused_result) {
.none => unreachable,
.void_value,
.unreachable_value,
=> true,
else => false,
};
if (!elide_check) {
_ = try gz.addUnNode(.ensure_result_used, maybe_unused_result, statement);
}
},
}
}
}
fn varDecl(
gz: *GenZir,
scope: *Scope,
node: ast.Node.Index,
block_arena: *Allocator,
var_decl: ast.full.VarDecl,
) InnerError!*Scope {
const mod = gz.astgen.mod;
if (var_decl.comptime_token) |comptime_token| {
return mod.failTok(scope, comptime_token, "TODO implement comptime locals", .{});
}
if (var_decl.ast.align_node != 0) {
return mod.failNode(scope, var_decl.ast.align_node, "TODO implement alignment on locals", .{});
}
const astgen = gz.astgen;
const tree = gz.tree();
const token_tags = tree.tokens.items(.tag);
const name_token = var_decl.ast.mut_token + 1;
const name_src = gz.tokSrcLoc(name_token);
const ident_name = try mod.identifierTokenString(scope, name_token);
// Local variables shadowing detection, including function parameters.
{
var s = scope;
while (true) switch (s.tag) {
.local_val => {
const local_val = s.cast(Scope.LocalVal).?;
if (mem.eql(u8, local_val.name, ident_name)) {
const msg = msg: {
const msg = try mod.errMsg(scope, name_src, "redefinition of '{s}'", .{
ident_name,
});
errdefer msg.destroy(mod.gpa);
try mod.errNote(scope, local_val.src, msg, "previous definition is here", .{});
break :msg msg;
};
return mod.failWithOwnedErrorMsg(scope, msg);
}
s = local_val.parent;
},
.local_ptr => {
const local_ptr = s.cast(Scope.LocalPtr).?;
if (mem.eql(u8, local_ptr.name, ident_name)) {
const msg = msg: {
const msg = try mod.errMsg(scope, name_src, "redefinition of '{s}'", .{
ident_name,
});
errdefer msg.destroy(mod.gpa);
try mod.errNote(scope, local_ptr.src, msg, "previous definition is here", .{});
break :msg msg;
};
return mod.failWithOwnedErrorMsg(scope, msg);
}
s = local_ptr.parent;
},
.gen_zir => s = s.cast(GenZir).?.parent,
else => break,
};
}
// Namespace vars shadowing detection
if (mod.lookupDeclName(scope, ident_name)) |_| {
// TODO add note for other definition
return mod.fail(scope, name_src, "redefinition of '{s}'", .{ident_name});
}
if (var_decl.ast.init_node == 0) {
return mod.fail(scope, name_src, "variables must be initialized", .{});
}
switch (token_tags[var_decl.ast.mut_token]) {
.keyword_const => {
// Depending on the type of AST the initialization expression is, we may need an lvalue
// or an rvalue as a result location. If it is an rvalue, we can use the instruction as
// the variable, no memory location needed.
if (!nodeMayNeedMemoryLocation(tree, var_decl.ast.init_node)) {
const result_loc: ResultLoc = if (var_decl.ast.type_node != 0) .{
.ty = try typeExpr(gz, scope, var_decl.ast.type_node),
} else .none;
const init_inst = try expr(gz, scope, result_loc, var_decl.ast.init_node);
const sub_scope = try block_arena.create(Scope.LocalVal);
sub_scope.* = .{
.parent = scope,
.gen_zir = gz,
.name = ident_name,
.inst = init_inst,
.src = name_src,
};
return &sub_scope.base;
}
// Detect whether the initialization expression actually uses the
// result location pointer.
var init_scope: GenZir = .{
.parent = scope,
.force_comptime = gz.force_comptime,
.astgen = astgen,
};
defer init_scope.instructions.deinit(mod.gpa);
var resolve_inferred_alloc: zir.Inst.Ref = .none;
var opt_type_inst: zir.Inst.Ref = .none;
if (var_decl.ast.type_node != 0) {
const type_inst = try typeExpr(gz, &init_scope.base, var_decl.ast.type_node);
opt_type_inst = type_inst;
init_scope.rl_ptr = try init_scope.addUnNode(.alloc, type_inst, node);
init_scope.rl_ty_inst = type_inst;
} else {
const alloc = try init_scope.addUnNode(.alloc_inferred, undefined, node);
resolve_inferred_alloc = alloc;
init_scope.rl_ptr = alloc;
}
const init_result_loc: ResultLoc = .{ .block_ptr = &init_scope };
const init_inst = try expr(&init_scope, &init_scope.base, init_result_loc, var_decl.ast.init_node);
const zir_tags = astgen.instructions.items(.tag);
const zir_datas = astgen.instructions.items(.data);
const parent_zir = &gz.instructions;
if (init_scope.rvalue_rl_count == 1) {
// Result location pointer not used. We don't need an alloc for this
// const local, and type inference becomes trivial.
// Move the init_scope instructions into the parent scope, eliding
// the alloc instruction and the store_to_block_ptr instruction.
const expected_len = parent_zir.items.len + init_scope.instructions.items.len - 2;
try parent_zir.ensureCapacity(mod.gpa, expected_len);
for (init_scope.instructions.items) |src_inst| {
if (astgen.indexToRef(src_inst) == init_scope.rl_ptr) continue;
if (zir_tags[src_inst] == .store_to_block_ptr) {
if (zir_datas[src_inst].bin.lhs == init_scope.rl_ptr) continue;
}
parent_zir.appendAssumeCapacity(src_inst);
}
assert(parent_zir.items.len == expected_len);
const sub_scope = try block_arena.create(Scope.LocalVal);
sub_scope.* = .{
.parent = scope,
.gen_zir = gz,
.name = ident_name,
.inst = init_inst,
.src = name_src,
};
return &sub_scope.base;
}
// The initialization expression took advantage of the result location
// of the const local. In this case we will create an alloc and a LocalPtr for it.
// Move the init_scope instructions into the parent scope, swapping
// store_to_block_ptr for store_to_inferred_ptr.
const expected_len = parent_zir.items.len + init_scope.instructions.items.len;
try parent_zir.ensureCapacity(mod.gpa, expected_len);
for (init_scope.instructions.items) |src_inst| {
if (zir_tags[src_inst] == .store_to_block_ptr) {
if (zir_datas[src_inst].bin.lhs == init_scope.rl_ptr) {
zir_tags[src_inst] = .store_to_inferred_ptr;
}
}
parent_zir.appendAssumeCapacity(src_inst);
}
assert(parent_zir.items.len == expected_len);
if (resolve_inferred_alloc != .none) {
_ = try gz.addUnNode(.resolve_inferred_alloc, resolve_inferred_alloc, node);
}
const sub_scope = try block_arena.create(Scope.LocalPtr);
sub_scope.* = .{
.parent = scope,
.gen_zir = gz,
.name = ident_name,
.ptr = init_scope.rl_ptr,
.src = name_src,
};
return &sub_scope.base;
},
.keyword_var => {
var resolve_inferred_alloc: zir.Inst.Ref = .none;
const var_data: struct {
result_loc: ResultLoc,
alloc: zir.Inst.Ref,
} = if (var_decl.ast.type_node != 0) a: {
const type_inst = try typeExpr(gz, scope, var_decl.ast.type_node);
const alloc = try gz.addUnNode(.alloc_mut, type_inst, node);
break :a .{ .alloc = alloc, .result_loc = .{ .ptr = alloc } };
} else a: {
const alloc = try gz.addUnNode(.alloc_inferred_mut, undefined, node);
resolve_inferred_alloc = alloc;
break :a .{ .alloc = alloc, .result_loc = .{ .inferred_ptr = alloc } };
};
const init_inst = try expr(gz, scope, var_data.result_loc, var_decl.ast.init_node);
if (resolve_inferred_alloc != .none) {
_ = try gz.addUnNode(.resolve_inferred_alloc, resolve_inferred_alloc, node);
}
const sub_scope = try block_arena.create(Scope.LocalPtr);
sub_scope.* = .{
.parent = scope,
.gen_zir = gz,
.name = ident_name,
.ptr = var_data.alloc,
.src = name_src,
};
return &sub_scope.base;
},
else => unreachable,
}
}
fn assign(gz: *GenZir, scope: *Scope, infix_node: ast.Node.Index) InnerError!void {
const tree = gz.tree();
const node_datas = tree.nodes.items(.data);
const main_tokens = tree.nodes.items(.main_token);
const node_tags = tree.nodes.items(.tag);
const lhs = node_datas[infix_node].lhs;
const rhs = node_datas[infix_node].rhs;
if (node_tags[lhs] == .identifier) {
// This intentionally does not support `@"_"` syntax.
const ident_name = tree.tokenSlice(main_tokens[lhs]);
if (mem.eql(u8, ident_name, "_")) {
_ = try expr(gz, scope, .discard, rhs);
return;
}
}
const lvalue = try lvalExpr(gz, scope, lhs);
_ = try expr(gz, scope, .{ .ptr = lvalue }, rhs);
}
fn assignOp(
gz: *GenZir,
scope: *Scope,
infix_node: ast.Node.Index,
op_inst_tag: zir.Inst.Tag,
) InnerError!void {
const tree = gz.tree();
const node_datas = tree.nodes.items(.data);
const lhs_ptr = try lvalExpr(gz, scope, node_datas[infix_node].lhs);
const lhs = try gz.addUnNode(.load, lhs_ptr, infix_node);
const lhs_type = try gz.addUnNode(.typeof, lhs, infix_node);
const rhs = try expr(gz, scope, .{ .ty = lhs_type }, node_datas[infix_node].rhs);
const result = try gz.addPlNode(op_inst_tag, infix_node, zir.Inst.Bin{
.lhs = lhs,
.rhs = rhs,
});
_ = try gz.addBin(.store, lhs_ptr, result);
}
fn boolNot(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerError!zir.Inst.Ref {
const tree = gz.tree();
const node_datas = tree.nodes.items(.data);
const operand = try expr(gz, scope, .{ .ty = .bool_type }, node_datas[node].lhs);
const result = try gz.addUnNode(.bool_not, operand, node);
return rvalue(gz, scope, rl, result, node);
}
fn bitNot(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerError!zir.Inst.Ref {
const tree = gz.tree();
const node_datas = tree.nodes.items(.data);
const operand = try expr(gz, scope, .none, node_datas[node].lhs);
const result = try gz.addUnNode(.bit_not, operand, node);
return rvalue(gz, scope, rl, result, node);
}
fn negation(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
node: ast.Node.Index,
tag: zir.Inst.Tag,
) InnerError!zir.Inst.Ref {
const tree = gz.tree();
const node_datas = tree.nodes.items(.data);
const operand = try expr(gz, scope, .none, node_datas[node].lhs);
const result = try gz.addUnNode(tag, operand, node);
return rvalue(gz, scope, rl, result, node);
}
fn ptrType(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
node: ast.Node.Index,
ptr_info: ast.full.PtrType,
) InnerError!zir.Inst.Ref {
const tree = gz.tree();
const elem_type = try typeExpr(gz, scope, ptr_info.ast.child_type);
const simple = ptr_info.ast.align_node == 0 and
ptr_info.ast.sentinel == 0 and
ptr_info.ast.bit_range_start == 0;
if (simple) {
const result = try gz.add(.{ .tag = .ptr_type_simple, .data = .{
.ptr_type_simple = .{
.is_allowzero = ptr_info.allowzero_token != null,
.is_mutable = ptr_info.const_token == null,
.is_volatile = ptr_info.volatile_token != null,
.size = ptr_info.size,
.elem_type = elem_type,
},
} });
return rvalue(gz, scope, rl, result, node);
}
var sentinel_ref: zir.Inst.Ref = .none;
var align_ref: zir.Inst.Ref = .none;
var bit_start_ref: zir.Inst.Ref = .none;
var bit_end_ref: zir.Inst.Ref = .none;
var trailing_count: u32 = 0;
if (ptr_info.ast.sentinel != 0) {
sentinel_ref = try expr(gz, scope, .{ .ty = elem_type }, ptr_info.ast.sentinel);
trailing_count += 1;
}
if (ptr_info.ast.align_node != 0) {
align_ref = try expr(gz, scope, .none, ptr_info.ast.align_node);
trailing_count += 1;
}
if (ptr_info.ast.bit_range_start != 0) {
assert(ptr_info.ast.bit_range_end != 0);
bit_start_ref = try expr(gz, scope, .none, ptr_info.ast.bit_range_start);
bit_end_ref = try expr(gz, scope, .none, ptr_info.ast.bit_range_end);
trailing_count += 2;
}
const gpa = gz.astgen.mod.gpa;
try gz.instructions.ensureCapacity(gpa, gz.instructions.items.len + 1);
try gz.astgen.instructions.ensureCapacity(gpa, gz.astgen.instructions.len + 1);
try gz.astgen.extra.ensureCapacity(gpa, gz.astgen.extra.items.len +
@typeInfo(zir.Inst.PtrType).Struct.fields.len + trailing_count);
const payload_index = gz.astgen.addExtraAssumeCapacity(zir.Inst.PtrType{ .elem_type = elem_type });
if (sentinel_ref != .none) {
gz.astgen.extra.appendAssumeCapacity(@enumToInt(sentinel_ref));
}
if (align_ref != .none) {
gz.astgen.extra.appendAssumeCapacity(@enumToInt(align_ref));
}
if (bit_start_ref != .none) {
gz.astgen.extra.appendAssumeCapacity(@enumToInt(bit_start_ref));
gz.astgen.extra.appendAssumeCapacity(@enumToInt(bit_end_ref));
}
const new_index = @intCast(zir.Inst.Index, gz.astgen.instructions.len);
const result = gz.astgen.indexToRef(new_index);
gz.astgen.instructions.appendAssumeCapacity(.{ .tag = .ptr_type, .data = .{
.ptr_type = .{
.flags = .{
.is_allowzero = ptr_info.allowzero_token != null,
.is_mutable = ptr_info.const_token == null,
.is_volatile = ptr_info.volatile_token != null,
.has_sentinel = sentinel_ref != .none,
.has_align = align_ref != .none,
.has_bit_range = bit_start_ref != .none,
},
.size = ptr_info.size,
.payload_index = payload_index,
},
} });
gz.instructions.appendAssumeCapacity(new_index);
return rvalue(gz, scope, rl, result, node);
}
fn arrayType(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) !zir.Inst.Ref {
const tree = gz.tree();
const node_datas = tree.nodes.items(.data);
// TODO check for [_]T
const len = try expr(gz, scope, .{ .ty = .usize_type }, node_datas[node].lhs);
const elem_type = try typeExpr(gz, scope, node_datas[node].rhs);
const result = try gz.addBin(.array_type, len, elem_type);
return rvalue(gz, scope, rl, result, node);
}
fn arrayTypeSentinel(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) !zir.Inst.Ref {
const tree = gz.tree();
const node_datas = tree.nodes.items(.data);
const extra = tree.extraData(node_datas[node].rhs, ast.Node.ArrayTypeSentinel);
// TODO check for [_]T
const len = try expr(gz, scope, .{ .ty = .usize_type }, node_datas[node].lhs);
const elem_type = try typeExpr(gz, scope, extra.elem_type);
const sentinel = try expr(gz, scope, .{ .ty = elem_type }, extra.sentinel);
const result = try gz.addArrayTypeSentinel(len, elem_type, sentinel);
return rvalue(gz, scope, rl, result, node);
}
fn containerDecl(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
node: ast.Node.Index,
container_decl: ast.full.ContainerDecl,
) InnerError!zir.Inst.Ref {
const astgen = gz.astgen;
const mod = astgen.mod;
const gpa = mod.gpa;
const tree = gz.tree();
const token_tags = tree.tokens.items(.tag);
const node_tags = tree.nodes.items(.tag);
// We must not create any types until Sema. Here the goal is only to generate
// ZIR for all the field types, alignments, and default value expressions.
const arg_inst: zir.Inst.Ref = if (container_decl.ast.arg != 0)
try comptimeExpr(gz, scope, .none, container_decl.ast.arg)
else
.none;
switch (token_tags[container_decl.ast.main_token]) {
.keyword_struct => {
const tag = if (container_decl.layout_token) |t| switch (token_tags[t]) {
.keyword_packed => zir.Inst.Tag.struct_decl_packed,
.keyword_extern => zir.Inst.Tag.struct_decl_extern,
else => unreachable,
} else zir.Inst.Tag.struct_decl;
if (container_decl.ast.members.len == 0) {
const result = try gz.addPlNode(tag, node, zir.Inst.StructDecl{
.fields_len = 0,
});
return rvalue(gz, scope, rl, result, node);
}
assert(arg_inst == .none);
var fields_data = ArrayListUnmanaged(u32){};
defer fields_data.deinit(gpa);
// field_name and field_type are both mandatory
try fields_data.ensureCapacity(gpa, container_decl.ast.members.len * 2);
// We only need this if there are greater than 16 fields.
var bit_bag = ArrayListUnmanaged(u32){};
defer bit_bag.deinit(gpa);
var cur_bit_bag: u32 = 0;
var member_index: usize = 0;
while (true) {
const member_node = container_decl.ast.members[member_index];
const member = switch (node_tags[member_node]) {
.container_field_init => tree.containerFieldInit(member_node),
.container_field_align => tree.containerFieldAlign(member_node),
.container_field => tree.containerField(member_node),
else => unreachable,
};
if (member.comptime_token) |comptime_token| {
return mod.failTok(scope, comptime_token, "TODO implement comptime struct fields", .{});
}
try fields_data.ensureCapacity(gpa, fields_data.items.len + 4);
const field_name = try gz.identAsString(member.ast.name_token);
fields_data.appendAssumeCapacity(field_name);
const field_type = try typeExpr(gz, scope, member.ast.type_expr);
fields_data.appendAssumeCapacity(@enumToInt(field_type));
const have_align = member.ast.align_expr != 0;
const have_value = member.ast.value_expr != 0;
cur_bit_bag = (cur_bit_bag >> 2) |
(@as(u32, @boolToInt(have_align)) << 30) |
(@as(u32, @boolToInt(have_value)) << 31);
if (have_align) {
const align_inst = try comptimeExpr(gz, scope, .{ .ty = .u32_type }, member.ast.align_expr);
fields_data.appendAssumeCapacity(@enumToInt(align_inst));
}
if (have_value) {
const default_inst = try comptimeExpr(gz, scope, .{ .ty = field_type }, member.ast.value_expr);
fields_data.appendAssumeCapacity(@enumToInt(default_inst));
}
member_index += 1;
if (member_index < container_decl.ast.members.len) {
if (member_index % 16 == 0) {
try bit_bag.append(gpa, cur_bit_bag);
cur_bit_bag = 0;
}
} else {
break;
}
}
const empty_slot_count = 16 - ((member_index - 1) % 16);
cur_bit_bag >>= @intCast(u5, empty_slot_count * 2);
const result = try gz.addPlNode(tag, node, zir.Inst.StructDecl{
.fields_len = @intCast(u32, container_decl.ast.members.len),
});
try astgen.extra.ensureCapacity(gpa, astgen.extra.items.len +
bit_bag.items.len + 1 + fields_data.items.len);
astgen.extra.appendSliceAssumeCapacity(bit_bag.items); // Likely empty.
astgen.extra.appendAssumeCapacity(cur_bit_bag);
astgen.extra.appendSliceAssumeCapacity(fields_data.items);
return rvalue(gz, scope, rl, result, node);
},
.keyword_union => {
return mod.failTok(scope, container_decl.ast.main_token, "TODO AstGen for union decl", .{});
},
.keyword_enum => {
return mod.failTok(scope, container_decl.ast.main_token, "TODO AstGen for enum decl", .{});
},
.keyword_opaque => {
const result = try gz.addNode(.opaque_decl, node);
return rvalue(gz, scope, rl, result, node);
},
else => unreachable,
}
}
fn errorSetDecl(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
node: ast.Node.Index,
) InnerError!zir.Inst.Ref {
const mod = gz.astgen.mod;
const tree = gz.tree();
const main_tokens = tree.nodes.items(.main_token);
const token_tags = tree.tokens.items(.tag);
const arena = gz.astgen.arena;
// Count how many fields there are.
const error_token = main_tokens[node];
const count: usize = count: {
var tok_i = error_token + 2;
var count: usize = 0;
while (true) : (tok_i += 1) {
switch (token_tags[tok_i]) {
.doc_comment, .comma => {},
.identifier => count += 1,
.r_brace => break :count count,
else => unreachable,
}
} else unreachable; // TODO should not need else unreachable here
};
const fields = try arena.alloc([]const u8, count);
{
var tok_i = error_token + 2;
var field_i: usize = 0;
while (true) : (tok_i += 1) {
switch (token_tags[tok_i]) {
.doc_comment, .comma => {},
.identifier => {
fields[field_i] = try mod.identifierTokenString(scope, tok_i);
field_i += 1;
},
.r_brace => break,
else => unreachable,
}
}
}
const error_set = try arena.create(Module.ErrorSet);
error_set.* = .{
.owner_decl = gz.astgen.decl,
.node_offset = gz.astgen.decl.nodeIndexToRelative(node),
.names_ptr = fields.ptr,
.names_len = @intCast(u32, fields.len),
};
const error_set_ty = try Type.Tag.error_set.create(arena, error_set);
const typed_value = try arena.create(TypedValue);
typed_value.* = .{
.ty = Type.initTag(.type),
.val = try Value.Tag.ty.create(arena, error_set_ty),
};
const result = try gz.addConst(typed_value);
return rvalue(gz, scope, rl, result, node);
}
fn orelseCatchExpr(
parent_gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
node: ast.Node.Index,
lhs: ast.Node.Index,
cond_op: zir.Inst.Tag,
unwrap_op: zir.Inst.Tag,
unwrap_code_op: zir.Inst.Tag,
rhs: ast.Node.Index,
payload_token: ?ast.TokenIndex,
) InnerError!zir.Inst.Ref {
const mod = parent_gz.astgen.mod;
const tree = parent_gz.tree();
var block_scope: GenZir = .{
.parent = scope,
.astgen = parent_gz.astgen,
.force_comptime = parent_gz.force_comptime,
.instructions = .{},
};
block_scope.setBreakResultLoc(rl);
defer block_scope.instructions.deinit(mod.gpa);
// This could be a pointer or value depending on the `operand_rl` parameter.
// We cannot use `block_scope.break_result_loc` because that has the bare
// type, whereas this expression has the optional type. Later we make
// up for this fact by calling rvalue on the else branch.
block_scope.break_count += 1;
// TODO handle catch
const operand_rl: ResultLoc = switch (block_scope.break_result_loc) {
.ref => .ref,
.discard, .none, .block_ptr, .inferred_ptr => .none,
.ty => |elem_ty| blk: {
const wrapped_ty = try block_scope.addUnNode(.optional_type, elem_ty, node);
break :blk .{ .ty = wrapped_ty };
},
.ptr => |ptr_ty| blk: {
const wrapped_ty = try block_scope.addUnNode(.optional_type_from_ptr_elem, ptr_ty, node);
break :blk .{ .ty = wrapped_ty };
},
};
const operand = try expr(&block_scope, &block_scope.base, operand_rl, lhs);
const cond = try block_scope.addUnNode(cond_op, operand, node);
const condbr = try block_scope.addCondBr(.condbr, node);
const block = try parent_gz.addBlock(.block, node);
try parent_gz.instructions.append(mod.gpa, block);
try block_scope.setBlockBody(block);
var then_scope: GenZir = .{
.parent = scope,
.astgen = parent_gz.astgen,
.force_comptime = block_scope.force_comptime,
.instructions = .{},
};
defer then_scope.instructions.deinit(mod.gpa);
var err_val_scope: Scope.LocalVal = undefined;
const then_sub_scope = blk: {
const payload = payload_token orelse break :blk &then_scope.base;
if (mem.eql(u8, tree.tokenSlice(payload), "_")) {
return mod.failTok(&then_scope.base, payload, "discard of error capture; omit it instead", .{});
}
const err_name = try mod.identifierTokenString(scope, payload);
err_val_scope = .{
.parent = &then_scope.base,
.gen_zir = &then_scope,
.name = err_name,
.inst = try then_scope.addUnNode(unwrap_code_op, operand, node),
.src = parent_gz.tokSrcLoc(payload),
};
break :blk &err_val_scope.base;
};
block_scope.break_count += 1;
const then_result = try expr(&then_scope, then_sub_scope, block_scope.break_result_loc, rhs);
// We hold off on the break instructions as well as copying the then/else
// instructions into place until we know whether to keep store_to_block_ptr
// instructions or not.
var else_scope: GenZir = .{
.parent = scope,
.astgen = parent_gz.astgen,
.force_comptime = block_scope.force_comptime,
.instructions = .{},
};
defer else_scope.instructions.deinit(mod.gpa);
// This could be a pointer or value depending on `unwrap_op`.
const unwrapped_payload = try else_scope.addUnNode(unwrap_op, operand, node);
const else_result = switch (rl) {
.ref => unwrapped_payload,
else => try rvalue(&else_scope, &else_scope.base, block_scope.break_result_loc, unwrapped_payload, node),
};
return finishThenElseBlock(
parent_gz,
scope,
rl,
node,
&block_scope,
&then_scope,
&else_scope,
condbr,
cond,
node,
node,
then_result,
else_result,
block,
block,
.@"break",
);
}
fn finishThenElseBlock(
parent_gz: *GenZir,
parent_scope: *Scope,
rl: ResultLoc,
node: ast.Node.Index,
block_scope: *GenZir,
then_scope: *GenZir,
else_scope: *GenZir,
condbr: zir.Inst.Index,
cond: zir.Inst.Ref,
then_src: ast.Node.Index,
else_src: ast.Node.Index,
then_result: zir.Inst.Ref,
else_result: zir.Inst.Ref,
main_block: zir.Inst.Index,
then_break_block: zir.Inst.Index,
break_tag: zir.Inst.Tag,
) InnerError!zir.Inst.Ref {
// We now have enough information to decide whether the result instruction should
// be communicated via result location pointer or break instructions.
const strat = rl.strategy(block_scope);
const astgen = block_scope.astgen;
switch (strat.tag) {
.break_void => {
if (!astgen.refIsNoReturn(then_result)) {
_ = try then_scope.addBreak(break_tag, then_break_block, .void_value);
}
const elide_else = if (else_result != .none) astgen.refIsNoReturn(else_result) else false;
if (!elide_else) {
_ = try else_scope.addBreak(break_tag, main_block, .void_value);
}
assert(!strat.elide_store_to_block_ptr_instructions);
try setCondBrPayload(condbr, cond, then_scope, else_scope);
return astgen.indexToRef(main_block);
},
.break_operand => {
if (!astgen.refIsNoReturn(then_result)) {
_ = try then_scope.addBreak(break_tag, then_break_block, then_result);
}
if (else_result != .none) {
if (!astgen.refIsNoReturn(else_result)) {
_ = try else_scope.addBreak(break_tag, main_block, else_result);
}
} else {
_ = try else_scope.addBreak(break_tag, main_block, .void_value);
}
if (strat.elide_store_to_block_ptr_instructions) {
try setCondBrPayloadElideBlockStorePtr(condbr, cond, then_scope, else_scope);
} else {
try setCondBrPayload(condbr, cond, then_scope, else_scope);
}
const block_ref = astgen.indexToRef(main_block);
switch (rl) {
.ref => return block_ref,
else => return rvalue(parent_gz, parent_scope, rl, block_ref, node),
}
},
}
}
/// Return whether the identifier names of two tokens are equal. Resolves @""
/// tokens without allocating.
/// OK in theory it could do it without allocating. This implementation
/// allocates when the @"" form is used.
fn tokenIdentEql(mod: *Module, scope: *Scope, token1: ast.TokenIndex, token2: ast.TokenIndex) !bool {
const ident_name_1 = try mod.identifierTokenString(scope, token1);
const ident_name_2 = try mod.identifierTokenString(scope, token2);
return mem.eql(u8, ident_name_1, ident_name_2);
}
pub fn fieldAccess(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
node: ast.Node.Index,
) InnerError!zir.Inst.Ref {
const astgen = gz.astgen;
const mod = astgen.mod;
const tree = gz.tree();
const main_tokens = tree.nodes.items(.main_token);
const node_datas = tree.nodes.items(.data);
const object_node = node_datas[node].lhs;
const dot_token = main_tokens[node];
const field_ident = dot_token + 1;
const str_index = try gz.identAsString(field_ident);
switch (rl) {
.ref => return gz.addPlNode(.field_ptr, node, zir.Inst.Field{
.lhs = try expr(gz, scope, .ref, object_node),
.field_name_start = str_index,
}),
else => return rvalue(gz, scope, rl, try gz.addPlNode(.field_val, node, zir.Inst.Field{
.lhs = try expr(gz, scope, .none, object_node),
.field_name_start = str_index,
}), node),
}
}
fn arrayAccess(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
node: ast.Node.Index,
) InnerError!zir.Inst.Ref {
const tree = gz.tree();
const main_tokens = tree.nodes.items(.main_token);
const node_datas = tree.nodes.items(.data);
switch (rl) {
.ref => return gz.addBin(
.elem_ptr,
try expr(gz, scope, .ref, node_datas[node].lhs),
try expr(gz, scope, .{ .ty = .usize_type }, node_datas[node].rhs),
),
else => return rvalue(gz, scope, rl, try gz.addBin(
.elem_val,
try expr(gz, scope, .none, node_datas[node].lhs),
try expr(gz, scope, .{ .ty = .usize_type }, node_datas[node].rhs),
), node),
}
}
fn simpleBinOp(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
node: ast.Node.Index,
op_inst_tag: zir.Inst.Tag,
) InnerError!zir.Inst.Ref {
const tree = gz.tree();
const node_datas = tree.nodes.items(.data);
const result = try gz.addPlNode(op_inst_tag, node, zir.Inst.Bin{
.lhs = try expr(gz, scope, .none, node_datas[node].lhs),
.rhs = try expr(gz, scope, .none, node_datas[node].rhs),
});
return rvalue(gz, scope, rl, result, node);
}
fn simpleStrTok(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
ident_token: ast.TokenIndex,
node: ast.Node.Index,
op_inst_tag: zir.Inst.Tag,
) InnerError!zir.Inst.Ref {
const str_index = try gz.identAsString(ident_token);
const result = try gz.addStrTok(op_inst_tag, str_index, ident_token);
return rvalue(gz, scope, rl, result, node);
}
fn boolBinOp(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
node: ast.Node.Index,
zir_tag: zir.Inst.Tag,
) InnerError!zir.Inst.Ref {
const node_datas = gz.tree().nodes.items(.data);
const lhs = try expr(gz, scope, .{ .ty = .bool_type }, node_datas[node].lhs);
const bool_br = try gz.addBoolBr(zir_tag, lhs);
var rhs_scope: GenZir = .{
.parent = scope,
.astgen = gz.astgen,
.force_comptime = gz.force_comptime,
};
defer rhs_scope.instructions.deinit(gz.astgen.mod.gpa);
const rhs = try expr(&rhs_scope, &rhs_scope.base, .{ .ty = .bool_type }, node_datas[node].rhs);
_ = try rhs_scope.addBreak(.break_inline, bool_br, rhs);
try rhs_scope.setBoolBrBody(bool_br);
const block_ref = gz.astgen.indexToRef(bool_br);
return rvalue(gz, scope, rl, block_ref, node);
}
fn ifExpr(
parent_gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
node: ast.Node.Index,
if_full: ast.full.If,
) InnerError!zir.Inst.Ref {
const mod = parent_gz.astgen.mod;
var block_scope: GenZir = .{
.parent = scope,
.astgen = parent_gz.astgen,
.force_comptime = parent_gz.force_comptime,
.instructions = .{},
};
block_scope.setBreakResultLoc(rl);
defer block_scope.instructions.deinit(mod.gpa);
const cond = c: {
// TODO https://github.com/ziglang/zig/issues/7929
if (if_full.error_token) |error_token| {
return mod.failTok(scope, error_token, "TODO implement if error union", .{});
} else if (if_full.payload_token) |payload_token| {
return mod.failTok(scope, payload_token, "TODO implement if optional", .{});
} else {
break :c try expr(&block_scope, &block_scope.base, .{ .ty = .bool_type }, if_full.ast.cond_expr);
}
};
const condbr = try block_scope.addCondBr(.condbr, node);
const block = try parent_gz.addBlock(.block, node);
try parent_gz.instructions.append(mod.gpa, block);
try block_scope.setBlockBody(block);
var then_scope: GenZir = .{
.parent = scope,
.astgen = parent_gz.astgen,
.force_comptime = block_scope.force_comptime,
.instructions = .{},
};
defer then_scope.instructions.deinit(mod.gpa);
// declare payload to the then_scope
const then_sub_scope = &then_scope.base;
block_scope.break_count += 1;
const then_result = try expr(&then_scope, then_sub_scope, block_scope.break_result_loc, if_full.ast.then_expr);
// We hold off on the break instructions as well as copying the then/else
// instructions into place until we know whether to keep store_to_block_ptr
// instructions or not.
var else_scope: GenZir = .{
.parent = scope,
.astgen = parent_gz.astgen,
.force_comptime = block_scope.force_comptime,
.instructions = .{},
};
defer else_scope.instructions.deinit(mod.gpa);
const else_node = if_full.ast.else_expr;
const else_info: struct {
src: ast.Node.Index,
result: zir.Inst.Ref,
} = if (else_node != 0) blk: {
block_scope.break_count += 1;
const sub_scope = &else_scope.base;
break :blk .{
.src = else_node,
.result = try expr(&else_scope, sub_scope, block_scope.break_result_loc, else_node),
};
} else .{
.src = if_full.ast.then_expr,
.result = .none,
};
return finishThenElseBlock(
parent_gz,
scope,
rl,
node,
&block_scope,
&then_scope,
&else_scope,
condbr,
cond,
if_full.ast.then_expr,
else_info.src,
then_result,
else_info.result,
block,
block,
.@"break",
);
}
fn setCondBrPayload(
condbr: zir.Inst.Index,
cond: zir.Inst.Ref,
then_scope: *GenZir,
else_scope: *GenZir,
) !void {
const astgen = then_scope.astgen;
try astgen.extra.ensureCapacity(astgen.mod.gpa, astgen.extra.items.len +
@typeInfo(zir.Inst.CondBr).Struct.fields.len +
then_scope.instructions.items.len + else_scope.instructions.items.len);
const zir_datas = astgen.instructions.items(.data);
zir_datas[condbr].pl_node.payload_index = astgen.addExtraAssumeCapacity(zir.Inst.CondBr{
.condition = cond,
.then_body_len = @intCast(u32, then_scope.instructions.items.len),
.else_body_len = @intCast(u32, else_scope.instructions.items.len),
});
astgen.extra.appendSliceAssumeCapacity(then_scope.instructions.items);
astgen.extra.appendSliceAssumeCapacity(else_scope.instructions.items);
}
/// If `elide_block_store_ptr` is set, expects to find exactly 1 .store_to_block_ptr instruction.
fn setCondBrPayloadElideBlockStorePtr(
condbr: zir.Inst.Index,
cond: zir.Inst.Ref,
then_scope: *GenZir,
else_scope: *GenZir,
) !void {
const astgen = then_scope.astgen;
try astgen.extra.ensureCapacity(astgen.mod.gpa, astgen.extra.items.len +
@typeInfo(zir.Inst.CondBr).Struct.fields.len +
then_scope.instructions.items.len + else_scope.instructions.items.len - 2);
const zir_datas = astgen.instructions.items(.data);
zir_datas[condbr].pl_node.payload_index = astgen.addExtraAssumeCapacity(zir.Inst.CondBr{
.condition = cond,
.then_body_len = @intCast(u32, then_scope.instructions.items.len - 1),
.else_body_len = @intCast(u32, else_scope.instructions.items.len - 1),
});
const zir_tags = astgen.instructions.items(.tag);
for ([_]*GenZir{ then_scope, else_scope }) |scope| {
for (scope.instructions.items) |src_inst| {
if (zir_tags[src_inst] != .store_to_block_ptr) {
astgen.extra.appendAssumeCapacity(src_inst);
}
}
}
}
fn whileExpr(
parent_gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
node: ast.Node.Index,
while_full: ast.full.While,
) InnerError!zir.Inst.Ref {
const mod = parent_gz.astgen.mod;
if (while_full.label_token) |label_token| {
try checkLabelRedefinition(mod, scope, label_token);
}
const is_inline = parent_gz.force_comptime or while_full.inline_token != null;
const loop_tag: zir.Inst.Tag = if (is_inline) .block_inline else .loop;
const loop_block = try parent_gz.addBlock(loop_tag, node);
try parent_gz.instructions.append(mod.gpa, loop_block);
var loop_scope: GenZir = .{
.parent = scope,
.astgen = parent_gz.astgen,
.force_comptime = parent_gz.force_comptime,
.instructions = .{},
};
loop_scope.setBreakResultLoc(rl);
defer loop_scope.instructions.deinit(mod.gpa);
var continue_scope: GenZir = .{
.parent = &loop_scope.base,
.astgen = parent_gz.astgen,
.force_comptime = loop_scope.force_comptime,
.instructions = .{},
};
defer continue_scope.instructions.deinit(mod.gpa);
const cond = c: {
// TODO https://github.com/ziglang/zig/issues/7929
if (while_full.error_token) |error_token| {
return mod.failTok(scope, error_token, "TODO implement while error union", .{});
} else if (while_full.payload_token) |payload_token| {
return mod.failTok(scope, payload_token, "TODO implement while optional", .{});
} else {
const bool_type_rl: ResultLoc = .{ .ty = .bool_type };
break :c try expr(&continue_scope, &continue_scope.base, bool_type_rl, while_full.ast.cond_expr);
}
};
const condbr_tag: zir.Inst.Tag = if (is_inline) .condbr_inline else .condbr;
const condbr = try continue_scope.addCondBr(condbr_tag, node);
const block_tag: zir.Inst.Tag = if (is_inline) .block_inline else .block;
const cond_block = try loop_scope.addBlock(block_tag, node);
try loop_scope.instructions.append(mod.gpa, cond_block);
try continue_scope.setBlockBody(cond_block);
// TODO avoid emitting the continue expr when there
// are no jumps to it. This happens when the last statement of a while body is noreturn
// and there are no `continue` statements.
if (while_full.ast.cont_expr != 0) {
_ = try expr(&loop_scope, &loop_scope.base, .{ .ty = .void_type }, while_full.ast.cont_expr);
}
const repeat_tag: zir.Inst.Tag = if (is_inline) .repeat_inline else .repeat;
_ = try loop_scope.addNode(repeat_tag, node);
try loop_scope.setBlockBody(loop_block);
loop_scope.break_block = loop_block;
loop_scope.continue_block = cond_block;
if (while_full.label_token) |label_token| {
loop_scope.label = @as(?GenZir.Label, GenZir.Label{
.token = label_token,
.block_inst = loop_block,
});
}
var then_scope: GenZir = .{
.parent = &continue_scope.base,
.astgen = parent_gz.astgen,
.force_comptime = continue_scope.force_comptime,
.instructions = .{},
};
defer then_scope.instructions.deinit(mod.gpa);
const then_sub_scope = &then_scope.base;
loop_scope.break_count += 1;
const then_result = try expr(&then_scope, then_sub_scope, loop_scope.break_result_loc, while_full.ast.then_expr);
var else_scope: GenZir = .{
.parent = &continue_scope.base,
.astgen = parent_gz.astgen,
.force_comptime = continue_scope.force_comptime,
.instructions = .{},
};
defer else_scope.instructions.deinit(mod.gpa);
const else_node = while_full.ast.else_expr;
const else_info: struct {
src: ast.Node.Index,
result: zir.Inst.Ref,
} = if (else_node != 0) blk: {
loop_scope.break_count += 1;
const sub_scope = &else_scope.base;
break :blk .{
.src = else_node,
.result = try expr(&else_scope, sub_scope, loop_scope.break_result_loc, else_node),
};
} else .{
.src = while_full.ast.then_expr,
.result = .none,
};
if (loop_scope.label) |some| {
if (!some.used) {
return mod.failTok(scope, some.token, "unused while loop label", .{});
}
}
const break_tag: zir.Inst.Tag = if (is_inline) .break_inline else .@"break";
return finishThenElseBlock(
parent_gz,
scope,
rl,
node,
&loop_scope,
&then_scope,
&else_scope,
condbr,
cond,
while_full.ast.then_expr,
else_info.src,
then_result,
else_info.result,
loop_block,
cond_block,
break_tag,
);
}
fn forExpr(
parent_gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
node: ast.Node.Index,
for_full: ast.full.While,
) InnerError!zir.Inst.Ref {
const mod = parent_gz.astgen.mod;
if (for_full.label_token) |label_token| {
try checkLabelRedefinition(mod, scope, label_token);
}
// Set up variables and constants.
const is_inline = parent_gz.force_comptime or for_full.inline_token != null;
const tree = parent_gz.tree();
const token_tags = tree.tokens.items(.tag);
const array_ptr = try expr(parent_gz, scope, .ref, for_full.ast.cond_expr);
const len = try parent_gz.addUnNode(.indexable_ptr_len, array_ptr, for_full.ast.cond_expr);
const index_ptr = blk: {
const index_ptr = try parent_gz.addUnNode(.alloc, .usize_type, node);
// initialize to zero
_ = try parent_gz.addBin(.store, index_ptr, .zero_usize);
break :blk index_ptr;
};
const loop_tag: zir.Inst.Tag = if (is_inline) .block_inline else .loop;
const loop_block = try parent_gz.addBlock(loop_tag, node);
try parent_gz.instructions.append(mod.gpa, loop_block);
var loop_scope: GenZir = .{
.parent = scope,
.astgen = parent_gz.astgen,
.force_comptime = parent_gz.force_comptime,
.instructions = .{},
};
loop_scope.setBreakResultLoc(rl);
defer loop_scope.instructions.deinit(mod.gpa);
var cond_scope: GenZir = .{
.parent = &loop_scope.base,
.astgen = parent_gz.astgen,
.force_comptime = loop_scope.force_comptime,
.instructions = .{},
};
defer cond_scope.instructions.deinit(mod.gpa);
// check condition i < array_expr.len
const index = try cond_scope.addUnNode(.load, index_ptr, for_full.ast.cond_expr);
const cond = try cond_scope.addPlNode(.cmp_lt, for_full.ast.cond_expr, zir.Inst.Bin{
.lhs = index,
.rhs = len,
});
const condbr_tag: zir.Inst.Tag = if (is_inline) .condbr_inline else .condbr;
const condbr = try cond_scope.addCondBr(condbr_tag, node);
const block_tag: zir.Inst.Tag = if (is_inline) .block_inline else .block;
const cond_block = try loop_scope.addBlock(block_tag, node);
try loop_scope.instructions.append(mod.gpa, cond_block);
try cond_scope.setBlockBody(cond_block);
// Increment the index variable.
const index_2 = try loop_scope.addUnNode(.load, index_ptr, for_full.ast.cond_expr);
const index_plus_one = try loop_scope.addPlNode(.add, node, zir.Inst.Bin{
.lhs = index_2,
.rhs = .one_usize,
});
_ = try loop_scope.addBin(.store, index_ptr, index_plus_one);
const repeat_tag: zir.Inst.Tag = if (is_inline) .repeat_inline else .repeat;
_ = try loop_scope.addNode(repeat_tag, node);
try loop_scope.setBlockBody(loop_block);
loop_scope.break_block = loop_block;
loop_scope.continue_block = cond_block;
if (for_full.label_token) |label_token| {
loop_scope.label = @as(?GenZir.Label, GenZir.Label{
.token = label_token,
.block_inst = loop_block,
});
}
var then_scope: GenZir = .{
.parent = &cond_scope.base,
.astgen = parent_gz.astgen,
.force_comptime = cond_scope.force_comptime,
.instructions = .{},
};
defer then_scope.instructions.deinit(mod.gpa);
var index_scope: Scope.LocalPtr = undefined;
const then_sub_scope = blk: {
const payload_token = for_full.payload_token.?;
const ident = if (token_tags[payload_token] == .asterisk)
payload_token + 1
else
payload_token;
const is_ptr = ident != payload_token;
const value_name = tree.tokenSlice(ident);
if (!mem.eql(u8, value_name, "_")) {
return mod.failNode(&then_scope.base, ident, "TODO implement for loop value payload", .{});
} else if (is_ptr) {
return mod.failTok(&then_scope.base, payload_token, "pointer modifier invalid on discard", .{});
}
const index_token = if (token_tags[ident + 1] == .comma)
ident + 2
else
break :blk &then_scope.base;
if (mem.eql(u8, tree.tokenSlice(index_token), "_")) {
return mod.failTok(&then_scope.base, index_token, "discard of index capture; omit it instead", .{});
}
const index_name = try mod.identifierTokenString(&then_scope.base, index_token);
index_scope = .{
.parent = &then_scope.base,
.gen_zir = &then_scope,
.name = index_name,
.ptr = index_ptr,
.src = parent_gz.tokSrcLoc(index_token),
};
break :blk &index_scope.base;
};
loop_scope.break_count += 1;
const then_result = try expr(&then_scope, then_sub_scope, loop_scope.break_result_loc, for_full.ast.then_expr);
var else_scope: GenZir = .{
.parent = &cond_scope.base,
.astgen = parent_gz.astgen,
.force_comptime = cond_scope.force_comptime,
.instructions = .{},
};
defer else_scope.instructions.deinit(mod.gpa);
const else_node = for_full.ast.else_expr;
const else_info: struct {
src: ast.Node.Index,
result: zir.Inst.Ref,
} = if (else_node != 0) blk: {
loop_scope.break_count += 1;
const sub_scope = &else_scope.base;
break :blk .{
.src = else_node,
.result = try expr(&else_scope, sub_scope, loop_scope.break_result_loc, else_node),
};
} else .{
.src = for_full.ast.then_expr,
.result = .none,
};
if (loop_scope.label) |some| {
if (!some.used) {
return mod.failTok(scope, some.token, "unused for loop label", .{});
}
}
const break_tag: zir.Inst.Tag = if (is_inline) .break_inline else .@"break";
return finishThenElseBlock(
parent_gz,
scope,
rl,
node,
&loop_scope,
&then_scope,
&else_scope,
condbr,
cond,
for_full.ast.then_expr,
else_info.src,
then_result,
else_info.result,
loop_block,
cond_block,
break_tag,
);
}
fn getRangeNode(
node_tags: []const ast.Node.Tag,
node_datas: []const ast.Node.Data,
node: ast.Node.Index,
) ?ast.Node.Index {
switch (node_tags[node]) {
.switch_range => return node,
.grouped_expression => unreachable,
else => return null,
}
}
pub const SwitchProngSrc = union(enum) {
scalar: u32,
multi: Multi,
range: Multi,
pub const Multi = struct {
prong: u32,
item: u32,
};
pub const RangeExpand = enum { none, first, last };
/// This function is intended to be called only when it is certain that we need
/// the LazySrcLoc in order to emit a compile error.
pub fn resolve(
prong_src: SwitchProngSrc,
decl: *Decl,
switch_node_offset: i32,
range_expand: RangeExpand,
) LazySrcLoc {
@setCold(true);
const switch_node = decl.relativeToNodeIndex(switch_node_offset);
const tree = decl.container.file_scope.base.tree();
const main_tokens = tree.nodes.items(.main_token);
const node_datas = tree.nodes.items(.data);
const node_tags = tree.nodes.items(.tag);
const extra = tree.extraData(node_datas[switch_node].rhs, ast.Node.SubRange);
const case_nodes = tree.extra_data[extra.start..extra.end];
var multi_i: u32 = 0;
var scalar_i: u32 = 0;
for (case_nodes) |case_node| {
const case = switch (node_tags[case_node]) {
.switch_case_one => tree.switchCaseOne(case_node),
.switch_case => tree.switchCase(case_node),
else => unreachable,
};
if (case.ast.values.len == 0)
continue;
if (case.ast.values.len == 1 and
node_tags[case.ast.values[0]] == .identifier and
mem.eql(u8, tree.tokenSlice(main_tokens[case.ast.values[0]]), "_"))
{
continue;
}
const is_multi = case.ast.values.len != 1 or
getRangeNode(node_tags, node_datas, case.ast.values[0]) != null;
switch (prong_src) {
.scalar => |i| if (!is_multi and i == scalar_i) return LazySrcLoc{
.node_offset = decl.nodeIndexToRelative(case.ast.values[0]),
},
.multi => |s| if (is_multi and s.prong == multi_i) {
var item_i: u32 = 0;
for (case.ast.values) |item_node| {
if (getRangeNode(node_tags, node_datas, item_node) != null)
continue;
if (item_i == s.item) return LazySrcLoc{
.node_offset = decl.nodeIndexToRelative(item_node),
};
item_i += 1;
} else unreachable;
},
.range => |s| if (is_multi and s.prong == multi_i) {
var range_i: u32 = 0;
for (case.ast.values) |item_node| {
const range = getRangeNode(node_tags, node_datas, item_node) orelse continue;
if (range_i == s.item) switch (range_expand) {
.none => return LazySrcLoc{
.node_offset = decl.nodeIndexToRelative(item_node),
},
.first => return LazySrcLoc{
.node_offset = decl.nodeIndexToRelative(node_datas[range].lhs),
},
.last => return LazySrcLoc{
.node_offset = decl.nodeIndexToRelative(node_datas[range].rhs),
},
};
range_i += 1;
} else unreachable;
},
}
if (is_multi) {
multi_i += 1;
} else {
scalar_i += 1;
}
} else unreachable;
}
};
fn switchExpr(
parent_gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
switch_node: ast.Node.Index,
) InnerError!zir.Inst.Ref {
const astgen = parent_gz.astgen;
const mod = astgen.mod;
const gpa = mod.gpa;
const tree = parent_gz.tree();
const node_datas = tree.nodes.items(.data);
const node_tags = tree.nodes.items(.tag);
const main_tokens = tree.nodes.items(.main_token);
const token_tags = tree.tokens.items(.tag);
const operand_node = node_datas[switch_node].lhs;
const extra = tree.extraData(node_datas[switch_node].rhs, ast.Node.SubRange);
const case_nodes = tree.extra_data[extra.start..extra.end];
// We perform two passes over the AST. This first pass is to collect information
// for the following variables, make note of the special prong AST node index,
// and bail out with a compile error if there are multiple special prongs present.
var any_payload_is_ref = false;
var scalar_cases_len: u32 = 0;
var multi_cases_len: u32 = 0;
var special_prong: zir.SpecialProng = .none;
var special_node: ast.Node.Index = 0;
var else_src: ?LazySrcLoc = null;
var underscore_src: ?LazySrcLoc = null;
for (case_nodes) |case_node| {
const case = switch (node_tags[case_node]) {
.switch_case_one => tree.switchCaseOne(case_node),
.switch_case => tree.switchCase(case_node),
else => unreachable,
};
if (case.payload_token) |payload_token| {
if (token_tags[payload_token] == .asterisk) {
any_payload_is_ref = true;
}
}
// Check for else/`_` prong.
if (case.ast.values.len == 0) {
const case_src = parent_gz.tokSrcLoc(case.ast.arrow_token - 1);
if (else_src) |src| {
const msg = msg: {
const msg = try mod.errMsg(
scope,
case_src,
"multiple else prongs in switch expression",
.{},
);
errdefer msg.destroy(gpa);
try mod.errNote(scope, src, msg, "previous else prong is here", .{});
break :msg msg;
};
return mod.failWithOwnedErrorMsg(scope, msg);
} else if (underscore_src) |some_underscore| {
const msg = msg: {
const msg = try mod.errMsg(
scope,
parent_gz.nodeSrcLoc(switch_node),
"else and '_' prong in switch expression",
.{},
);
errdefer msg.destroy(gpa);
try mod.errNote(scope, case_src, msg, "else prong is here", .{});
try mod.errNote(scope, some_underscore, msg, "'_' prong is here", .{});
break :msg msg;
};
return mod.failWithOwnedErrorMsg(scope, msg);
}
special_node = case_node;
special_prong = .@"else";
else_src = case_src;
continue;
} else if (case.ast.values.len == 1 and
node_tags[case.ast.values[0]] == .identifier and
mem.eql(u8, tree.tokenSlice(main_tokens[case.ast.values[0]]), "_"))
{
const case_src = parent_gz.tokSrcLoc(case.ast.arrow_token - 1);
if (underscore_src) |src| {
const msg = msg: {
const msg = try mod.errMsg(
scope,
case_src,
"multiple '_' prongs in switch expression",
.{},
);
errdefer msg.destroy(gpa);
try mod.errNote(scope, src, msg, "previous '_' prong is here", .{});
break :msg msg;
};
return mod.failWithOwnedErrorMsg(scope, msg);
} else if (else_src) |some_else| {
const msg = msg: {
const msg = try mod.errMsg(
scope,
parent_gz.nodeSrcLoc(switch_node),
"else and '_' prong in switch expression",
.{},
);
errdefer msg.destroy(gpa);
try mod.errNote(scope, some_else, msg, "else prong is here", .{});
try mod.errNote(scope, case_src, msg, "'_' prong is here", .{});
break :msg msg;
};
return mod.failWithOwnedErrorMsg(scope, msg);
}
special_node = case_node;
special_prong = .under;
underscore_src = case_src;
continue;
}
if (case.ast.values.len == 1 and
getRangeNode(node_tags, node_datas, case.ast.values[0]) == null)
{
scalar_cases_len += 1;
} else {
multi_cases_len += 1;
}
}
const operand_rl: ResultLoc = if (any_payload_is_ref) .ref else .none;
const operand = try expr(parent_gz, scope, operand_rl, operand_node);
// We need the type of the operand to use as the result location for all the prong items.
const typeof_tag: zir.Inst.Tag = if (any_payload_is_ref) .typeof_elem else .typeof;
const operand_ty_inst = try parent_gz.addUnNode(typeof_tag, operand, operand_node);
const item_rl: ResultLoc = .{ .ty = operand_ty_inst };
// Contains the data that goes into the `extra` array for the SwitchBlock/SwitchBlockMulti.
// This is the header as well as the optional else prong body, as well as all the
// scalar cases.
// At the end we will memcpy this into place.
var scalar_cases_payload = ArrayListUnmanaged(u32){};
defer scalar_cases_payload.deinit(gpa);
// Same deal, but this is only the `extra` data for the multi cases.
var multi_cases_payload = ArrayListUnmanaged(u32){};
defer multi_cases_payload.deinit(gpa);
var block_scope: GenZir = .{
.parent = scope,
.astgen = astgen,
.force_comptime = parent_gz.force_comptime,
.instructions = .{},
};
block_scope.setBreakResultLoc(rl);
defer block_scope.instructions.deinit(gpa);
// This gets added to the parent block later, after the item expressions.
const switch_block = try parent_gz.addBlock(undefined, switch_node);
// We re-use this same scope for all cases, including the special prong, if any.
var case_scope: GenZir = .{
.parent = &block_scope.base,
.astgen = astgen,
.force_comptime = parent_gz.force_comptime,
.instructions = .{},
};
defer case_scope.instructions.deinit(gpa);
// Do the else/`_` first because it goes first in the payload.
var capture_val_scope: Scope.LocalVal = undefined;
if (special_node != 0) {
const case = switch (node_tags[special_node]) {
.switch_case_one => tree.switchCaseOne(special_node),
.switch_case => tree.switchCase(special_node),
else => unreachable,
};
const sub_scope = blk: {
const payload_token = case.payload_token orelse break :blk &case_scope.base;
const ident = if (token_tags[payload_token] == .asterisk)
payload_token + 1
else
payload_token;
const is_ptr = ident != payload_token;
if (mem.eql(u8, tree.tokenSlice(ident), "_")) {
if (is_ptr) {
return mod.failTok(&case_scope.base, payload_token, "pointer modifier invalid on discard", .{});
}
break :blk &case_scope.base;
}
const capture_tag: zir.Inst.Tag = if (is_ptr)
.switch_capture_else_ref
else
.switch_capture_else;
const capture = try case_scope.add(.{
.tag = capture_tag,
.data = .{ .switch_capture = .{
.switch_inst = switch_block,
.prong_index = undefined,
} },
});
const capture_name = try mod.identifierTokenString(&parent_gz.base, payload_token);
capture_val_scope = .{
.parent = &case_scope.base,
.gen_zir = &case_scope,
.name = capture_name,
.inst = capture,
.src = parent_gz.tokSrcLoc(payload_token),
};
break :blk &capture_val_scope.base;
};
const case_result = try expr(&case_scope, sub_scope, block_scope.break_result_loc, case.ast.target_expr);
if (!astgen.refIsNoReturn(case_result)) {
block_scope.break_count += 1;
_ = try case_scope.addBreak(.@"break", switch_block, case_result);
}
// Documentation for this: `zir.Inst.SwitchBlock` and `zir.Inst.SwitchBlockMulti`.
try scalar_cases_payload.ensureCapacity(gpa, scalar_cases_payload.items.len +
3 + // operand, scalar_cases_len, else body len
@boolToInt(multi_cases_len != 0) +
case_scope.instructions.items.len);
scalar_cases_payload.appendAssumeCapacity(@enumToInt(operand));
scalar_cases_payload.appendAssumeCapacity(scalar_cases_len);
if (multi_cases_len != 0) {
scalar_cases_payload.appendAssumeCapacity(multi_cases_len);
}
scalar_cases_payload.appendAssumeCapacity(@intCast(u32, case_scope.instructions.items.len));
scalar_cases_payload.appendSliceAssumeCapacity(case_scope.instructions.items);
} else {
// Documentation for this: `zir.Inst.SwitchBlock` and `zir.Inst.SwitchBlockMulti`.
try scalar_cases_payload.ensureCapacity(gpa, scalar_cases_payload.items.len +
2 + // operand, scalar_cases_len
@boolToInt(multi_cases_len != 0));
scalar_cases_payload.appendAssumeCapacity(@enumToInt(operand));
scalar_cases_payload.appendAssumeCapacity(scalar_cases_len);
if (multi_cases_len != 0) {
scalar_cases_payload.appendAssumeCapacity(multi_cases_len);
}
}
// In this pass we generate all the item and prong expressions except the special case.
var multi_case_index: u32 = 0;
var scalar_case_index: u32 = 0;
for (case_nodes) |case_node| {
if (case_node == special_node)
continue;
const case = switch (node_tags[case_node]) {
.switch_case_one => tree.switchCaseOne(case_node),
.switch_case => tree.switchCase(case_node),
else => unreachable,
};
// Reset the scope.
case_scope.instructions.shrinkRetainingCapacity(0);
const is_multi_case = case.ast.values.len != 1 or
getRangeNode(node_tags, node_datas, case.ast.values[0]) != null;
const sub_scope = blk: {
const payload_token = case.payload_token orelse break :blk &case_scope.base;
const ident = if (token_tags[payload_token] == .asterisk)
payload_token + 1
else
payload_token;
const is_ptr = ident != payload_token;
if (mem.eql(u8, tree.tokenSlice(ident), "_")) {
if (is_ptr) {
return mod.failTok(&case_scope.base, payload_token, "pointer modifier invalid on discard", .{});
}
break :blk &case_scope.base;
}
const is_multi_case_bits: u2 = @boolToInt(is_multi_case);
const is_ptr_bits: u2 = @boolToInt(is_ptr);
const capture_tag: zir.Inst.Tag = switch ((is_multi_case_bits << 1) | is_ptr_bits) {
0b00 => .switch_capture,
0b01 => .switch_capture_ref,
0b10 => .switch_capture_multi,
0b11 => .switch_capture_multi_ref,
};
const capture_index = if (is_multi_case) ci: {
multi_case_index += 1;
break :ci multi_case_index - 1;
} else ci: {
scalar_case_index += 1;
break :ci scalar_case_index - 1;
};
const capture = try case_scope.add(.{
.tag = capture_tag,
.data = .{ .switch_capture = .{
.switch_inst = switch_block,
.prong_index = capture_index,
} },
});
const capture_name = try mod.identifierTokenString(&parent_gz.base, payload_token);
capture_val_scope = .{
.parent = &case_scope.base,
.gen_zir = &case_scope,
.name = capture_name,
.inst = capture,
.src = parent_gz.tokSrcLoc(payload_token),
};
break :blk &capture_val_scope.base;
};
if (is_multi_case) {
// items_len, ranges_len, body_len
const header_index = multi_cases_payload.items.len;
try multi_cases_payload.resize(gpa, multi_cases_payload.items.len + 3);
// items
var items_len: u32 = 0;
for (case.ast.values) |item_node| {
if (getRangeNode(node_tags, node_datas, item_node) != null) continue;
items_len += 1;
const item_inst = try comptimeExpr(parent_gz, scope, item_rl, item_node);
try multi_cases_payload.append(gpa, @enumToInt(item_inst));
}
// ranges
var ranges_len: u32 = 0;
for (case.ast.values) |item_node| {
const range = getRangeNode(node_tags, node_datas, item_node) orelse continue;
ranges_len += 1;
const first = try comptimeExpr(parent_gz, scope, item_rl, node_datas[range].lhs);
const last = try comptimeExpr(parent_gz, scope, item_rl, node_datas[range].rhs);
try multi_cases_payload.appendSlice(gpa, &[_]u32{
@enumToInt(first), @enumToInt(last),
});
}
const case_result = try expr(&case_scope, sub_scope, block_scope.break_result_loc, case.ast.target_expr);
if (!astgen.refIsNoReturn(case_result)) {
block_scope.break_count += 1;
_ = try case_scope.addBreak(.@"break", switch_block, case_result);
}
multi_cases_payload.items[header_index + 0] = items_len;
multi_cases_payload.items[header_index + 1] = ranges_len;
multi_cases_payload.items[header_index + 2] = @intCast(u32, case_scope.instructions.items.len);
try multi_cases_payload.appendSlice(gpa, case_scope.instructions.items);
} else {
const item_node = case.ast.values[0];
const item_inst = try comptimeExpr(parent_gz, scope, item_rl, item_node);
const case_result = try expr(&case_scope, sub_scope, block_scope.break_result_loc, case.ast.target_expr);
if (!astgen.refIsNoReturn(case_result)) {
block_scope.break_count += 1;
_ = try case_scope.addBreak(.@"break", switch_block, case_result);
}
try scalar_cases_payload.ensureCapacity(gpa, scalar_cases_payload.items.len +
2 + case_scope.instructions.items.len);
scalar_cases_payload.appendAssumeCapacity(@enumToInt(item_inst));
scalar_cases_payload.appendAssumeCapacity(@intCast(u32, case_scope.instructions.items.len));
scalar_cases_payload.appendSliceAssumeCapacity(case_scope.instructions.items);
}
}
// Now that the item expressions are generated we can add this.
try parent_gz.instructions.append(gpa, switch_block);
const ref_bit: u4 = @boolToInt(any_payload_is_ref);
const multi_bit: u4 = @boolToInt(multi_cases_len != 0);
const special_prong_bits: u4 = @enumToInt(special_prong);
comptime {
assert(@enumToInt(zir.SpecialProng.none) == 0b00);
assert(@enumToInt(zir.SpecialProng.@"else") == 0b01);
assert(@enumToInt(zir.SpecialProng.under) == 0b10);
}
const zir_tags = astgen.instructions.items(.tag);
zir_tags[switch_block] = switch ((ref_bit << 3) | (special_prong_bits << 1) | multi_bit) {
0b0_00_0 => .switch_block,
0b0_00_1 => .switch_block_multi,
0b0_01_0 => .switch_block_else,
0b0_01_1 => .switch_block_else_multi,
0b0_10_0 => .switch_block_under,
0b0_10_1 => .switch_block_under_multi,
0b1_00_0 => .switch_block_ref,
0b1_00_1 => .switch_block_ref_multi,
0b1_01_0 => .switch_block_ref_else,
0b1_01_1 => .switch_block_ref_else_multi,
0b1_10_0 => .switch_block_ref_under,
0b1_10_1 => .switch_block_ref_under_multi,
else => unreachable,
};
const payload_index = astgen.extra.items.len;
const zir_datas = astgen.instructions.items(.data);
zir_datas[switch_block].pl_node.payload_index = @intCast(u32, payload_index);
try astgen.extra.ensureCapacity(gpa, astgen.extra.items.len +
scalar_cases_payload.items.len + multi_cases_payload.items.len);
const strat = rl.strategy(&block_scope);
switch (strat.tag) {
.break_operand => {
// Switch expressions return `true` for `nodeMayNeedMemoryLocation` thus
// this is always true.
assert(strat.elide_store_to_block_ptr_instructions);
// There will necessarily be a store_to_block_ptr for
// all prongs, except for prongs that ended with a noreturn instruction.
// Elide all the `store_to_block_ptr` instructions.
// The break instructions need to have their operands coerced if the
// switch's result location is a `ty`. In this case we overwrite the
// `store_to_block_ptr` instruction with an `as` instruction and repurpose
// it as the break operand.
var extra_index: usize = 0;
extra_index += 2;
extra_index += @boolToInt(multi_cases_len != 0);
if (special_prong != .none) special_prong: {
const body_len_index = extra_index;
const body_len = scalar_cases_payload.items[extra_index];
extra_index += 1;
if (body_len < 2) {
extra_index += body_len;
astgen.extra.appendSliceAssumeCapacity(scalar_cases_payload.items[0..extra_index]);
break :special_prong;
}
extra_index += body_len - 2;
const store_inst = scalar_cases_payload.items[extra_index];
if (zir_tags[store_inst] != .store_to_block_ptr) {
extra_index += 2;
astgen.extra.appendSliceAssumeCapacity(scalar_cases_payload.items[0..extra_index]);
break :special_prong;
}
assert(zir_datas[store_inst].bin.lhs == block_scope.rl_ptr);
if (block_scope.rl_ty_inst != .none) {
extra_index += 1;
const break_inst = scalar_cases_payload.items[extra_index];
extra_index += 1;
astgen.extra.appendSliceAssumeCapacity(scalar_cases_payload.items[0..extra_index]);
zir_tags[store_inst] = .as;
zir_datas[store_inst].bin = .{
.lhs = block_scope.rl_ty_inst,
.rhs = zir_datas[break_inst].@"break".operand,
};
zir_datas[break_inst].@"break".operand = astgen.indexToRef(store_inst);
} else {
scalar_cases_payload.items[body_len_index] -= 1;
astgen.extra.appendSliceAssumeCapacity(scalar_cases_payload.items[0..extra_index]);
extra_index += 1;
astgen.extra.appendAssumeCapacity(scalar_cases_payload.items[extra_index]);
extra_index += 1;
}
} else {
astgen.extra.appendSliceAssumeCapacity(scalar_cases_payload.items[0..extra_index]);
}
var scalar_i: u32 = 0;
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
const start_index = extra_index;
extra_index += 1;
const body_len_index = extra_index;
const body_len = scalar_cases_payload.items[extra_index];
extra_index += 1;
if (body_len < 2) {
extra_index += body_len;
astgen.extra.appendSliceAssumeCapacity(scalar_cases_payload.items[start_index..extra_index]);
continue;
}
extra_index += body_len - 2;
const store_inst = scalar_cases_payload.items[extra_index];
if (zir_tags[store_inst] != .store_to_block_ptr) {
extra_index += 2;
astgen.extra.appendSliceAssumeCapacity(scalar_cases_payload.items[start_index..extra_index]);
continue;
}
if (block_scope.rl_ty_inst != .none) {
extra_index += 1;
const break_inst = scalar_cases_payload.items[extra_index];
extra_index += 1;
astgen.extra.appendSliceAssumeCapacity(scalar_cases_payload.items[start_index..extra_index]);
zir_tags[store_inst] = .as;
zir_datas[store_inst].bin = .{
.lhs = block_scope.rl_ty_inst,
.rhs = zir_datas[break_inst].@"break".operand,
};
zir_datas[break_inst].@"break".operand = astgen.indexToRef(store_inst);
} else {
assert(zir_datas[store_inst].bin.lhs == block_scope.rl_ptr);
scalar_cases_payload.items[body_len_index] -= 1;
astgen.extra.appendSliceAssumeCapacity(scalar_cases_payload.items[start_index..extra_index]);
extra_index += 1;
astgen.extra.appendAssumeCapacity(scalar_cases_payload.items[extra_index]);
extra_index += 1;
}
}
extra_index = 0;
var multi_i: u32 = 0;
while (multi_i < multi_cases_len) : (multi_i += 1) {
const start_index = extra_index;
const items_len = multi_cases_payload.items[extra_index];
extra_index += 1;
const ranges_len = multi_cases_payload.items[extra_index];
extra_index += 1;
const body_len_index = extra_index;
const body_len = multi_cases_payload.items[extra_index];
extra_index += 1;
extra_index += items_len;
extra_index += 2 * ranges_len;
if (body_len < 2) {
extra_index += body_len;
astgen.extra.appendSliceAssumeCapacity(multi_cases_payload.items[start_index..extra_index]);
continue;
}
extra_index += body_len - 2;
const store_inst = multi_cases_payload.items[extra_index];
if (zir_tags[store_inst] != .store_to_block_ptr) {
extra_index += 2;
astgen.extra.appendSliceAssumeCapacity(multi_cases_payload.items[start_index..extra_index]);
continue;
}
if (block_scope.rl_ty_inst != .none) {
extra_index += 1;
const break_inst = multi_cases_payload.items[extra_index];
extra_index += 1;
astgen.extra.appendSliceAssumeCapacity(multi_cases_payload.items[start_index..extra_index]);
zir_tags[store_inst] = .as;
zir_datas[store_inst].bin = .{
.lhs = block_scope.rl_ty_inst,
.rhs = zir_datas[break_inst].@"break".operand,
};
zir_datas[break_inst].@"break".operand = astgen.indexToRef(store_inst);
} else {
assert(zir_datas[store_inst].bin.lhs == block_scope.rl_ptr);
multi_cases_payload.items[body_len_index] -= 1;
astgen.extra.appendSliceAssumeCapacity(multi_cases_payload.items[start_index..extra_index]);
extra_index += 1;
astgen.extra.appendAssumeCapacity(multi_cases_payload.items[extra_index]);
extra_index += 1;
}
}
const block_ref = astgen.indexToRef(switch_block);
switch (rl) {
.ref => return block_ref,
else => return rvalue(parent_gz, scope, rl, block_ref, switch_node),
}
},
.break_void => {
assert(!strat.elide_store_to_block_ptr_instructions);
astgen.extra.appendSliceAssumeCapacity(scalar_cases_payload.items);
astgen.extra.appendSliceAssumeCapacity(multi_cases_payload.items);
// Modify all the terminating instruction tags to become `break` variants.
var extra_index: usize = payload_index;
extra_index += 2;
extra_index += @boolToInt(multi_cases_len != 0);
if (special_prong != .none) {
const body_len = astgen.extra.items[extra_index];
extra_index += 1;
const body = astgen.extra.items[extra_index..][0..body_len];
extra_index += body_len;
const last = body[body.len - 1];
if (zir_tags[last] == .@"break" and
zir_datas[last].@"break".block_inst == switch_block)
{
zir_datas[last].@"break".operand = .void_value;
}
}
var scalar_i: u32 = 0;
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
extra_index += 1;
const body_len = astgen.extra.items[extra_index];
extra_index += 1;
const body = astgen.extra.items[extra_index..][0..body_len];
extra_index += body_len;
const last = body[body.len - 1];
if (zir_tags[last] == .@"break" and
zir_datas[last].@"break".block_inst == switch_block)
{
zir_datas[last].@"break".operand = .void_value;
}
}
var multi_i: u32 = 0;
while (multi_i < multi_cases_len) : (multi_i += 1) {
const items_len = astgen.extra.items[extra_index];
extra_index += 1;
const ranges_len = astgen.extra.items[extra_index];
extra_index += 1;
const body_len = astgen.extra.items[extra_index];
extra_index += 1;
extra_index += items_len;
extra_index += 2 * ranges_len;
const body = astgen.extra.items[extra_index..][0..body_len];
extra_index += body_len;
const last = body[body.len - 1];
if (zir_tags[last] == .@"break" and
zir_datas[last].@"break".block_inst == switch_block)
{
zir_datas[last].@"break".operand = .void_value;
}
}
return astgen.indexToRef(switch_block);
},
}
}
fn ret(gz: *GenZir, scope: *Scope, node: ast.Node.Index) InnerError!zir.Inst.Ref {
const tree = gz.tree();
const node_datas = tree.nodes.items(.data);
const main_tokens = tree.nodes.items(.main_token);
const operand_node = node_datas[node].lhs;
const operand: zir.Inst.Ref = if (operand_node != 0) operand: {
const rl: ResultLoc = if (nodeMayNeedMemoryLocation(tree, operand_node)) .{
.ptr = try gz.addNode(.ret_ptr, node),
} else .{
.ty = try gz.addNode(.ret_type, node),
};
break :operand try expr(gz, scope, rl, operand_node);
} else .void_value;
_ = try gz.addUnNode(.ret_node, operand, node);
return zir.Inst.Ref.unreachable_value;
}
fn identifier(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
ident: ast.Node.Index,
) InnerError!zir.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = gz.astgen.mod;
const tree = gz.tree();
const main_tokens = tree.nodes.items(.main_token);
const ident_token = main_tokens[ident];
const ident_name = try mod.identifierTokenString(scope, ident_token);
if (mem.eql(u8, ident_name, "_")) {
return mod.failNode(scope, ident, "TODO implement '_' identifier", .{});
}
if (simple_types.get(ident_name)) |zir_const_ref| {
return rvalue(gz, scope, rl, zir_const_ref, ident);
}
if (ident_name.len >= 2) integer: {
const first_c = ident_name[0];
if (first_c == 'i' or first_c == 'u') {
const signedness: std.builtin.Signedness = switch (first_c == 'i') {
true => .signed,
false => .unsigned,
};
const bit_count = std.fmt.parseInt(u16, ident_name[1..], 10) catch |err| switch (err) {
error.Overflow => return mod.failNode(
scope,
ident,
"primitive integer type '{s}' exceeds maximum bit width of 65535",
.{ident_name},
),
error.InvalidCharacter => break :integer,
};
const result = try gz.add(.{
.tag = .int_type,
.data = .{ .int_type = .{
.src_node = gz.astgen.decl.nodeIndexToRelative(ident),
.signedness = signedness,
.bit_count = bit_count,
} },
});
return rvalue(gz, scope, rl, result, ident);
}
}
// Local variables, including function parameters.
{
var s = scope;
while (true) switch (s.tag) {
.local_val => {
const local_val = s.cast(Scope.LocalVal).?;
if (mem.eql(u8, local_val.name, ident_name)) {
return rvalue(gz, scope, rl, local_val.inst, ident);
}
s = local_val.parent;
},
.local_ptr => {
const local_ptr = s.cast(Scope.LocalPtr).?;
if (mem.eql(u8, local_ptr.name, ident_name)) {
if (rl == .ref) return local_ptr.ptr;
const loaded = try gz.addUnNode(.load, local_ptr.ptr, ident);
return rvalue(gz, scope, rl, loaded, ident);
}
s = local_ptr.parent;
},
.gen_zir => s = s.cast(GenZir).?.parent,
else => break,
};
}
const gop = try gz.astgen.decl_map.getOrPut(mod.gpa, ident_name);
if (!gop.found_existing) {
const decl = mod.lookupDeclName(scope, ident_name) orelse
return mod.failNode(scope, ident, "use of undeclared identifier '{s}'", .{ident_name});
try gz.astgen.decls.append(mod.gpa, decl);
}
const decl_index = @intCast(u32, gop.index);
switch (rl) {
.ref => return gz.addDecl(.decl_ref, decl_index, ident),
else => return rvalue(gz, scope, rl, try gz.addDecl(.decl_val, decl_index, ident), ident),
}
}
fn stringLiteral(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
node: ast.Node.Index,
) InnerError!zir.Inst.Ref {
const tree = gz.tree();
const main_tokens = tree.nodes.items(.main_token);
const string_bytes = &gz.astgen.string_bytes;
const str_index = string_bytes.items.len;
const str_lit_token = main_tokens[node];
const token_bytes = tree.tokenSlice(str_lit_token);
try gz.astgen.mod.parseStrLit(scope, str_lit_token, string_bytes, token_bytes, 0);
const str_len = string_bytes.items.len - str_index;
const result = try gz.add(.{
.tag = .str,
.data = .{ .str = .{
.start = @intCast(u32, str_index),
.len = @intCast(u32, str_len),
} },
});
return rvalue(gz, scope, rl, result, node);
}
fn multilineStringLiteral(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
node: ast.Node.Index,
) InnerError!zir.Inst.Ref {
const tree = gz.tree();
const node_datas = tree.nodes.items(.data);
const main_tokens = tree.nodes.items(.main_token);
const start = node_datas[node].lhs;
const end = node_datas[node].rhs;
const gpa = gz.astgen.mod.gpa;
const string_bytes = &gz.astgen.string_bytes;
const str_index = string_bytes.items.len;
// First line: do not append a newline.
var tok_i = start;
{
const slice = tree.tokenSlice(tok_i);
const line_bytes = slice[2 .. slice.len - 1];
try string_bytes.appendSlice(gpa, line_bytes);
tok_i += 1;
}
// Following lines: each line prepends a newline.
while (tok_i <= end) : (tok_i += 1) {
const slice = tree.tokenSlice(tok_i);
const line_bytes = slice[2 .. slice.len - 1];
try string_bytes.ensureCapacity(gpa, string_bytes.items.len + line_bytes.len + 1);
string_bytes.appendAssumeCapacity('\n');
string_bytes.appendSliceAssumeCapacity(line_bytes);
}
const result = try gz.add(.{
.tag = .str,
.data = .{ .str = .{
.start = @intCast(u32, str_index),
.len = @intCast(u32, string_bytes.items.len - str_index),
} },
});
return rvalue(gz, scope, rl, result, node);
}
fn charLiteral(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) !zir.Inst.Ref {
const mod = gz.astgen.mod;
const tree = gz.tree();
const main_tokens = tree.nodes.items(.main_token);
const main_token = main_tokens[node];
const slice = tree.tokenSlice(main_token);
var bad_index: usize = undefined;
const value = std.zig.parseCharLiteral(slice, &bad_index) catch |err| switch (err) {
error.InvalidCharacter => {
const bad_byte = slice[bad_index];
const token_starts = tree.tokens.items(.start);
const src_off = @intCast(u32, token_starts[main_token] + bad_index);
return mod.failOff(scope, src_off, "invalid character: '{c}'\n", .{bad_byte});
},
};
const result = try gz.addInt(value);
return rvalue(gz, scope, rl, result, node);
}
fn integerLiteral(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
node: ast.Node.Index,
) InnerError!zir.Inst.Ref {
const tree = gz.tree();
const main_tokens = tree.nodes.items(.main_token);
const int_token = main_tokens[node];
const prefixed_bytes = tree.tokenSlice(int_token);
if (std.fmt.parseInt(u64, prefixed_bytes, 0)) |small_int| {
const result: zir.Inst.Ref = switch (small_int) {
0 => .zero,
1 => .one,
else => try gz.addInt(small_int),
};
return rvalue(gz, scope, rl, result, node);
} else |err| {
return gz.astgen.mod.failNode(scope, node, "TODO implement int literals that don't fit in a u64", .{});
}
}
fn floatLiteral(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
node: ast.Node.Index,
) InnerError!zir.Inst.Ref {
const arena = gz.astgen.arena;
const tree = gz.tree();
const main_tokens = tree.nodes.items(.main_token);
const main_token = main_tokens[node];
const bytes = tree.tokenSlice(main_token);
if (bytes.len > 2 and bytes[1] == 'x') {
assert(bytes[0] == '0'); // validated by tokenizer
return gz.astgen.mod.failTok(scope, main_token, "TODO implement hex floats", .{});
}
const float_number = std.fmt.parseFloat(f128, bytes) catch |e| switch (e) {
error.InvalidCharacter => unreachable, // validated by tokenizer
};
const typed_value = try arena.create(TypedValue);
typed_value.* = .{
.ty = Type.initTag(.comptime_float),
.val = try Value.Tag.float_128.create(arena, float_number),
};
const result = try gz.addConst(typed_value);
return rvalue(gz, scope, rl, result, node);
}
fn asmExpr(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
node: ast.Node.Index,
full: ast.full.Asm,
) InnerError!zir.Inst.Ref {
const mod = gz.astgen.mod;
const arena = gz.astgen.arena;
const tree = gz.tree();
const main_tokens = tree.nodes.items(.main_token);
const node_datas = tree.nodes.items(.data);
const asm_source = try expr(gz, scope, .{ .ty = .const_slice_u8_type }, full.ast.template);
if (full.outputs.len != 0) {
// when implementing this be sure to add test coverage for the asm return type
// not resolving into a type (the node_offset_asm_ret_ty field of LazySrcLoc)
return mod.failTok(scope, full.ast.asm_token, "TODO implement asm with an output", .{});
}
const constraints = try arena.alloc(u32, full.inputs.len);
const args = try arena.alloc(zir.Inst.Ref, full.inputs.len);
for (full.inputs) |input, i| {
const constraint_token = main_tokens[input] + 2;
const string_bytes = &gz.astgen.string_bytes;
constraints[i] = @intCast(u32, string_bytes.items.len);
const token_bytes = tree.tokenSlice(constraint_token);
try mod.parseStrLit(scope, constraint_token, string_bytes, token_bytes, 0);
try string_bytes.append(mod.gpa, 0);
args[i] = try expr(gz, scope, .{ .ty = .usize_type }, node_datas[input].lhs);
}
const tag: zir.Inst.Tag = if (full.volatile_token != null) .asm_volatile else .@"asm";
const result = try gz.addPlNode(tag, node, zir.Inst.Asm{
.asm_source = asm_source,
.return_type = .void_type,
.output = .none,
.args_len = @intCast(u32, full.inputs.len),
.clobbers_len = 0, // TODO implement asm clobbers
});
try gz.astgen.extra.ensureCapacity(mod.gpa, gz.astgen.extra.items.len +
args.len + constraints.len);
gz.astgen.appendRefsAssumeCapacity(args);
gz.astgen.extra.appendSliceAssumeCapacity(constraints);
return rvalue(gz, scope, rl, result, node);
}
fn as(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
node: ast.Node.Index,
lhs: ast.Node.Index,
rhs: ast.Node.Index,
) InnerError!zir.Inst.Ref {
const dest_type = try typeExpr(gz, scope, lhs);
switch (rl) {
.none, .discard, .ref, .ty => {
const result = try expr(gz, scope, .{ .ty = dest_type }, rhs);
return rvalue(gz, scope, rl, result, node);
},
.ptr => |result_ptr| {
return asRlPtr(gz, scope, rl, result_ptr, rhs, dest_type);
},
.block_ptr => |block_scope| {
return asRlPtr(gz, scope, rl, block_scope.rl_ptr, rhs, dest_type);
},
.inferred_ptr => |result_alloc| {
// TODO here we should be able to resolve the inference; we now have a type for the result.
return gz.astgen.mod.failNode(scope, node, "TODO implement @as with inferred-type result location pointer", .{});
},
}
}
fn asRlPtr(
parent_gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
result_ptr: zir.Inst.Ref,
operand_node: ast.Node.Index,
dest_type: zir.Inst.Ref,
) InnerError!zir.Inst.Ref {
// Detect whether this expr() call goes into rvalue() to store the result into the
// result location. If it does, elide the coerce_result_ptr instruction
// as well as the store instruction, instead passing the result as an rvalue.
const astgen = parent_gz.astgen;
var as_scope: GenZir = .{
.parent = scope,
.astgen = astgen,
.force_comptime = parent_gz.force_comptime,
.instructions = .{},
};
defer as_scope.instructions.deinit(astgen.mod.gpa);
as_scope.rl_ptr = try as_scope.addBin(.coerce_result_ptr, dest_type, result_ptr);
const result = try expr(&as_scope, &as_scope.base, .{ .block_ptr = &as_scope }, operand_node);
const parent_zir = &parent_gz.instructions;
if (as_scope.rvalue_rl_count == 1) {
// Busted! This expression didn't actually need a pointer.
const zir_tags = astgen.instructions.items(.tag);
const zir_datas = astgen.instructions.items(.data);
const expected_len = parent_zir.items.len + as_scope.instructions.items.len - 2;
try parent_zir.ensureCapacity(astgen.mod.gpa, expected_len);
for (as_scope.instructions.items) |src_inst| {
if (astgen.indexToRef(src_inst) == as_scope.rl_ptr) continue;
if (zir_tags[src_inst] == .store_to_block_ptr) {
if (zir_datas[src_inst].bin.lhs == as_scope.rl_ptr) continue;
}
parent_zir.appendAssumeCapacity(src_inst);
}
assert(parent_zir.items.len == expected_len);
const casted_result = try parent_gz.addBin(.as, dest_type, result);
return rvalue(parent_gz, scope, rl, casted_result, operand_node);
} else {
try parent_zir.appendSlice(astgen.mod.gpa, as_scope.instructions.items);
return result;
}
}
fn bitCast(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
node: ast.Node.Index,
lhs: ast.Node.Index,
rhs: ast.Node.Index,
) InnerError!zir.Inst.Ref {
const mod = gz.astgen.mod;
const dest_type = try typeExpr(gz, scope, lhs);
switch (rl) {
.none, .discard, .ty => {
const operand = try expr(gz, scope, .none, rhs);
const result = try gz.addPlNode(.bitcast, node, zir.Inst.Bin{
.lhs = dest_type,
.rhs = operand,
});
return rvalue(gz, scope, rl, result, node);
},
.ref => unreachable, // `@bitCast` is not allowed as an r-value.
.ptr => |result_ptr| {
const casted_result_ptr = try gz.addUnNode(.bitcast_result_ptr, result_ptr, node);
return expr(gz, scope, .{ .ptr = casted_result_ptr }, rhs);
},
.block_ptr => |block_ptr| {
return mod.failNode(scope, node, "TODO implement @bitCast with result location inferred peer types", .{});
},
.inferred_ptr => |result_alloc| {
// TODO here we should be able to resolve the inference; we now have a type for the result.
return mod.failNode(scope, node, "TODO implement @bitCast with inferred-type result location pointer", .{});
},
}
}
fn typeOf(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
node: ast.Node.Index,
params: []const ast.Node.Index,
) InnerError!zir.Inst.Ref {
if (params.len < 1) {
return gz.astgen.mod.failNode(scope, node, "expected at least 1 argument, found 0", .{});
}
if (params.len == 1) {
const result = try gz.addUnNode(.typeof, try expr(gz, scope, .none, params[0]), node);
return rvalue(gz, scope, rl, result, node);
}
const arena = gz.astgen.arena;
var items = try arena.alloc(zir.Inst.Ref, params.len);
for (params) |param, param_i| {
items[param_i] = try expr(gz, scope, .none, param);
}
const result = try gz.addPlNode(.typeof_peer, node, zir.Inst.MultiOp{
.operands_len = @intCast(u32, params.len),
});
try gz.astgen.appendRefs(items);
return rvalue(gz, scope, rl, result, node);
}
fn builtinCall(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
node: ast.Node.Index,
params: []const ast.Node.Index,
) InnerError!zir.Inst.Ref {
const mod = gz.astgen.mod;
const tree = gz.tree();
const main_tokens = tree.nodes.items(.main_token);
const builtin_token = main_tokens[node];
const builtin_name = tree.tokenSlice(builtin_token);
// We handle the different builtins manually because they have different semantics depending
// on the function. For example, `@as` and others participate in result location semantics,
// and `@cImport` creates a special scope that collects a .c source code text buffer.
// Also, some builtins have a variable number of parameters.
const info = BuiltinFn.list.get(builtin_name) orelse {
return mod.failNode(scope, node, "invalid builtin function: '{s}'", .{
builtin_name,
});
};
if (info.param_count) |expected| {
if (expected != params.len) {
const s = if (expected == 1) "" else "s";
return mod.failNode(scope, node, "expected {d} parameter{s}, found {d}", .{
expected, s, params.len,
});
}
}
switch (info.tag) {
.ptr_to_int => {
const operand = try expr(gz, scope, .none, params[0]);
const result = try gz.addUnNode(.ptrtoint, operand, node);
return rvalue(gz, scope, rl, result, node);
},
.float_cast => {
const dest_type = try typeExpr(gz, scope, params[0]);
const rhs = try expr(gz, scope, .none, params[1]);
const result = try gz.addPlNode(.floatcast, node, zir.Inst.Bin{
.lhs = dest_type,
.rhs = rhs,
});
return rvalue(gz, scope, rl, result, node);
},
.int_cast => {
const dest_type = try typeExpr(gz, scope, params[0]);
const rhs = try expr(gz, scope, .none, params[1]);
const result = try gz.addPlNode(.intcast, node, zir.Inst.Bin{
.lhs = dest_type,
.rhs = rhs,
});
return rvalue(gz, scope, rl, result, node);
},
.breakpoint => {
const result = try gz.add(.{
.tag = .breakpoint,
.data = .{ .node = gz.astgen.decl.nodeIndexToRelative(node) },
});
return rvalue(gz, scope, rl, result, node);
},
.import => {
const target = try expr(gz, scope, .none, params[0]);
const result = try gz.addUnNode(.import, target, node);
return rvalue(gz, scope, rl, result, node);
},
.error_to_int => {
const target = try expr(gz, scope, .none, params[0]);
const result = try gz.addUnNode(.error_to_int, target, node);
return rvalue(gz, scope, rl, result, node);
},
.int_to_error => {
const target = try expr(gz, scope, .{ .ty = .u16_type }, params[0]);
const result = try gz.addUnNode(.int_to_error, target, node);
return rvalue(gz, scope, rl, result, node);
},
.compile_error => {
const target = try expr(gz, scope, .none, params[0]);
const result = try gz.addUnNode(.compile_error, target, node);
return rvalue(gz, scope, rl, result, node);
},
.set_eval_branch_quota => {
const quota = try expr(gz, scope, .{ .ty = .u32_type }, params[0]);
const result = try gz.addUnNode(.set_eval_branch_quota, quota, node);
return rvalue(gz, scope, rl, result, node);
},
.compile_log => {
const arg_refs = try mod.gpa.alloc(zir.Inst.Ref, params.len);
defer mod.gpa.free(arg_refs);
for (params) |param, i| arg_refs[i] = try expr(gz, scope, .none, param);
const result = try gz.addPlNode(.compile_log, node, zir.Inst.MultiOp{
.operands_len = @intCast(u32, params.len),
});
try gz.astgen.appendRefs(arg_refs);
return rvalue(gz, scope, rl, result, node);
},
.field => {
const field_name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, params[1]);
if (rl == .ref) {
return try gz.addPlNode(.field_ptr_named, node, zir.Inst.FieldNamed{
.lhs = try expr(gz, scope, .ref, params[0]),
.field_name = field_name,
});
}
const result = try gz.addPlNode(.field_val_named, node, zir.Inst.FieldNamed{
.lhs = try expr(gz, scope, .none, params[0]),
.field_name = field_name,
});
return rvalue(gz, scope, rl, result, node);
},
.as => return as(gz, scope, rl, node, params[0], params[1]),
.bit_cast => return bitCast(gz, scope, rl, node, params[0], params[1]),
.TypeOf => return typeOf(gz, scope, rl, node, params),
.add_with_overflow,
.align_cast,
.align_of,
.atomic_load,
.atomic_rmw,
.atomic_store,
.bit_offset_of,
.bool_to_int,
.bit_size_of,
.mul_add,
.byte_swap,
.bit_reverse,
.byte_offset_of,
.call,
.c_define,
.c_import,
.c_include,
.clz,
.cmpxchg_strong,
.cmpxchg_weak,
.ctz,
.c_undef,
.div_exact,
.div_floor,
.div_trunc,
.embed_file,
.enum_to_int,
.error_name,
.error_return_trace,
.err_set_cast,
.@"export",
.fence,
.field_parent_ptr,
.float_to_int,
.has_decl,
.has_field,
.int_to_enum,
.int_to_float,
.int_to_ptr,
.memcpy,
.memset,
.wasm_memory_size,
.wasm_memory_grow,
.mod,
.mul_with_overflow,
.panic,
.pop_count,
.ptr_cast,
.rem,
.return_address,
.set_align_stack,
.set_cold,
.set_float_mode,
.set_runtime_safety,
.shl_exact,
.shl_with_overflow,
.shr_exact,
.shuffle,
.size_of,
.splat,
.reduce,
.src,
.sqrt,
.sin,
.cos,
.exp,
.exp2,
.log,
.log2,
.log10,
.fabs,
.floor,
.ceil,
.trunc,
.round,
.sub_with_overflow,
.tag_name,
.This,
.truncate,
.Type,
.type_info,
.type_name,
.union_init,
=> return mod.failNode(scope, node, "TODO: implement builtin function {s}", .{
builtin_name,
}),
.async_call,
.frame,
.Frame,
.frame_address,
.frame_size,
=> return mod.failNode(scope, node, "async and related features are not yet supported", .{}),
}
}
fn callExpr(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
node: ast.Node.Index,
call: ast.full.Call,
) InnerError!zir.Inst.Ref {
const mod = gz.astgen.mod;
if (call.async_token) |async_token| {
return mod.failTok(scope, async_token, "async and related features are not yet supported", .{});
}
const lhs = try expr(gz, scope, .none, call.ast.fn_expr);
const args = try mod.gpa.alloc(zir.Inst.Ref, call.ast.params.len);
defer mod.gpa.free(args);
for (call.ast.params) |param_node, i| {
const param_type = try gz.add(.{
.tag = .param_type,
.data = .{ .param_type = .{
.callee = lhs,
.param_index = @intCast(u32, i),
} },
});
args[i] = try expr(gz, scope, .{ .ty = param_type }, param_node);
}
const modifier: std.builtin.CallOptions.Modifier = switch (call.async_token != null) {
true => .async_kw,
false => .auto,
};
const result: zir.Inst.Ref = res: {
const tag: zir.Inst.Tag = switch (modifier) {
.auto => switch (args.len == 0) {
true => break :res try gz.addUnNode(.call_none, lhs, node),
false => .call,
},
.async_kw => return mod.failNode(scope, node, "async and related features are not yet supported", .{}),
.never_tail => unreachable,
.never_inline => unreachable,
.no_async => return mod.failNode(scope, node, "async and related features are not yet supported", .{}),
.always_tail => unreachable,
.always_inline => unreachable,
.compile_time => .call_compile_time,
};
break :res try gz.addCall(tag, lhs, args, node);
};
return rvalue(gz, scope, rl, result, node); // TODO function call with result location
}
pub const simple_types = std.ComptimeStringMap(zir.Inst.Ref, .{
.{ "u8", .u8_type },
.{ "i8", .i8_type },
.{ "u16", .u16_type },
.{ "i16", .i16_type },
.{ "u32", .u32_type },
.{ "i32", .i32_type },
.{ "u64", .u64_type },
.{ "i64", .i64_type },
.{ "usize", .usize_type },
.{ "isize", .isize_type },
.{ "c_short", .c_short_type },
.{ "c_ushort", .c_ushort_type },
.{ "c_int", .c_int_type },
.{ "c_uint", .c_uint_type },
.{ "c_long", .c_long_type },
.{ "c_ulong", .c_ulong_type },
.{ "c_longlong", .c_longlong_type },
.{ "c_ulonglong", .c_ulonglong_type },
.{ "c_longdouble", .c_longdouble_type },
.{ "f16", .f16_type },
.{ "f32", .f32_type },
.{ "f64", .f64_type },
.{ "f128", .f128_type },
.{ "c_void", .c_void_type },
.{ "bool", .bool_type },
.{ "void", .void_type },
.{ "type", .type_type },
.{ "anyerror", .anyerror_type },
.{ "comptime_int", .comptime_int_type },
.{ "comptime_float", .comptime_float_type },
.{ "noreturn", .noreturn_type },
.{ "null", .null_type },
.{ "undefined", .undefined_type },
.{ "undefined", .undef },
.{ "null", .null_value },
.{ "true", .bool_true },
.{ "false", .bool_false },
});
fn nodeMayNeedMemoryLocation(tree: *const ast.Tree, start_node: ast.Node.Index) bool {
const node_tags = tree.nodes.items(.tag);
const node_datas = tree.nodes.items(.data);
const main_tokens = tree.nodes.items(.main_token);
const token_tags = tree.tokens.items(.tag);
var node = start_node;
while (true) {
switch (node_tags[node]) {
.root,
.@"usingnamespace",
.test_decl,
.switch_case,
.switch_case_one,
.container_field_init,
.container_field_align,
.container_field,
.asm_output,
.asm_input,
=> unreachable,
.@"return",
.@"break",
.@"continue",
.bit_not,
.bool_not,
.global_var_decl,
.local_var_decl,
.simple_var_decl,
.aligned_var_decl,
.@"defer",
.@"errdefer",
.address_of,
.optional_type,
.negation,
.negation_wrap,
.@"resume",
.array_type,
.array_type_sentinel,
.ptr_type_aligned,
.ptr_type_sentinel,
.ptr_type,
.ptr_type_bit_range,
.@"suspend",
.@"anytype",
.fn_proto_simple,
.fn_proto_multi,
.fn_proto_one,
.fn_proto,
.fn_decl,
.anyframe_type,
.anyframe_literal,
.integer_literal,
.float_literal,
.enum_literal,
.string_literal,
.multiline_string_literal,
.char_literal,
.true_literal,
.false_literal,
.null_literal,
.undefined_literal,
.unreachable_literal,
.identifier,
.error_set_decl,
.container_decl,
.container_decl_trailing,
.container_decl_two,
.container_decl_two_trailing,
.container_decl_arg,
.container_decl_arg_trailing,
.tagged_union,
.tagged_union_trailing,
.tagged_union_two,
.tagged_union_two_trailing,
.tagged_union_enum_tag,
.tagged_union_enum_tag_trailing,
.@"asm",
.asm_simple,
.add,
.add_wrap,
.array_cat,
.array_mult,
.assign,
.assign_bit_and,
.assign_bit_or,
.assign_bit_shift_left,
.assign_bit_shift_right,
.assign_bit_xor,
.assign_div,
.assign_sub,
.assign_sub_wrap,
.assign_mod,
.assign_add,
.assign_add_wrap,
.assign_mul,
.assign_mul_wrap,
.bang_equal,
.bit_and,
.bit_or,
.bit_shift_left,
.bit_shift_right,
.bit_xor,
.bool_and,
.bool_or,
.div,
.equal_equal,
.error_union,
.greater_or_equal,
.greater_than,
.less_or_equal,
.less_than,
.merge_error_sets,
.mod,
.mul,
.mul_wrap,
.switch_range,
.field_access,
.sub,
.sub_wrap,
.slice,
.slice_open,
.slice_sentinel,
.deref,
.array_access,
.error_value,
.while_simple, // This variant cannot have an else expression.
.while_cont, // This variant cannot have an else expression.
.for_simple, // This variant cannot have an else expression.
.if_simple, // This variant cannot have an else expression.
=> return false,
// Forward the question to the LHS sub-expression.
.grouped_expression,
.@"try",
.@"await",
.@"comptime",
.@"nosuspend",
.unwrap_optional,
=> node = node_datas[node].lhs,
// Forward the question to the RHS sub-expression.
.@"catch",
.@"orelse",
=> node = node_datas[node].rhs,
// True because these are exactly the expressions we need memory locations for.
.array_init_one,
.array_init_one_comma,
.array_init_dot_two,
.array_init_dot_two_comma,
.array_init_dot,
.array_init_dot_comma,
.array_init,
.array_init_comma,
.struct_init_one,
.struct_init_one_comma,
.struct_init_dot_two,
.struct_init_dot_two_comma,
.struct_init_dot,
.struct_init_dot_comma,
.struct_init,
.struct_init_comma,
=> return true,
// True because depending on comptime conditions, sub-expressions
// may be the kind that need memory locations.
.@"while", // This variant always has an else expression.
.@"if", // This variant always has an else expression.
.@"for", // This variant always has an else expression.
.@"switch",
.switch_comma,
.call_one,
.call_one_comma,
.async_call_one,
.async_call_one_comma,
.call,
.call_comma,
.async_call,
.async_call_comma,
=> return true,
.block_two,
.block_two_semicolon,
.block,
.block_semicolon,
=> {
const lbrace = main_tokens[node];
if (token_tags[lbrace - 1] == .colon) {
// Labeled blocks may need a memory location to forward
// to their break statements.
return true;
} else {
return false;
}
},
.builtin_call,
.builtin_call_comma,
.builtin_call_two,
.builtin_call_two_comma,
=> {
const builtin_token = main_tokens[node];
const builtin_name = tree.tokenSlice(builtin_token);
// If the builtin is an invalid name, we don't cause an error here; instead
// let it pass, and the error will be "invalid builtin function" later.
const builtin_info = BuiltinFn.list.get(builtin_name) orelse return false;
return builtin_info.needs_mem_loc;
},
}
}
}
/// Applies `rl` semantics to `inst`. Expressions which do not do their own handling of
/// result locations must call this function on their result.
/// As an example, if the `ResultLoc` is `ptr`, it will write the result to the pointer.
/// If the `ResultLoc` is `ty`, it will coerce the result to the type.
fn rvalue(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
result: zir.Inst.Ref,
src_node: ast.Node.Index,
) InnerError!zir.Inst.Ref {
switch (rl) {
.none => return result,
.discard => {
// Emit a compile error for discarding error values.
_ = try gz.addUnNode(.ensure_result_non_error, result, src_node);
return result;
},
.ref => {
// We need a pointer but we have a value.
const tree = gz.tree();
const src_token = tree.firstToken(src_node);
return gz.addUnTok(.ref, result, src_token);
},
.ty => |ty_inst| {
// Quickly eliminate some common, unnecessary type coercion.
const as_ty = @as(u64, @enumToInt(zir.Inst.Ref.type_type)) << 32;
const as_comptime_int = @as(u64, @enumToInt(zir.Inst.Ref.comptime_int_type)) << 32;
const as_bool = @as(u64, @enumToInt(zir.Inst.Ref.bool_type)) << 32;
const as_usize = @as(u64, @enumToInt(zir.Inst.Ref.usize_type)) << 32;
const as_void = @as(u64, @enumToInt(zir.Inst.Ref.void_type)) << 32;
switch ((@as(u64, @enumToInt(ty_inst)) << 32) | @as(u64, @enumToInt(result))) {
as_ty | @enumToInt(zir.Inst.Ref.u8_type),
as_ty | @enumToInt(zir.Inst.Ref.i8_type),
as_ty | @enumToInt(zir.Inst.Ref.u16_type),
as_ty | @enumToInt(zir.Inst.Ref.i16_type),
as_ty | @enumToInt(zir.Inst.Ref.u32_type),
as_ty | @enumToInt(zir.Inst.Ref.i32_type),
as_ty | @enumToInt(zir.Inst.Ref.u64_type),
as_ty | @enumToInt(zir.Inst.Ref.i64_type),
as_ty | @enumToInt(zir.Inst.Ref.usize_type),
as_ty | @enumToInt(zir.Inst.Ref.isize_type),
as_ty | @enumToInt(zir.Inst.Ref.c_short_type),
as_ty | @enumToInt(zir.Inst.Ref.c_ushort_type),
as_ty | @enumToInt(zir.Inst.Ref.c_int_type),
as_ty | @enumToInt(zir.Inst.Ref.c_uint_type),
as_ty | @enumToInt(zir.Inst.Ref.c_long_type),
as_ty | @enumToInt(zir.Inst.Ref.c_ulong_type),
as_ty | @enumToInt(zir.Inst.Ref.c_longlong_type),
as_ty | @enumToInt(zir.Inst.Ref.c_ulonglong_type),
as_ty | @enumToInt(zir.Inst.Ref.c_longdouble_type),
as_ty | @enumToInt(zir.Inst.Ref.f16_type),
as_ty | @enumToInt(zir.Inst.Ref.f32_type),
as_ty | @enumToInt(zir.Inst.Ref.f64_type),
as_ty | @enumToInt(zir.Inst.Ref.f128_type),
as_ty | @enumToInt(zir.Inst.Ref.c_void_type),
as_ty | @enumToInt(zir.Inst.Ref.bool_type),
as_ty | @enumToInt(zir.Inst.Ref.void_type),
as_ty | @enumToInt(zir.Inst.Ref.type_type),
as_ty | @enumToInt(zir.Inst.Ref.anyerror_type),
as_ty | @enumToInt(zir.Inst.Ref.comptime_int_type),
as_ty | @enumToInt(zir.Inst.Ref.comptime_float_type),
as_ty | @enumToInt(zir.Inst.Ref.noreturn_type),
as_ty | @enumToInt(zir.Inst.Ref.null_type),
as_ty | @enumToInt(zir.Inst.Ref.undefined_type),
as_ty | @enumToInt(zir.Inst.Ref.fn_noreturn_no_args_type),
as_ty | @enumToInt(zir.Inst.Ref.fn_void_no_args_type),
as_ty | @enumToInt(zir.Inst.Ref.fn_naked_noreturn_no_args_type),
as_ty | @enumToInt(zir.Inst.Ref.fn_ccc_void_no_args_type),
as_ty | @enumToInt(zir.Inst.Ref.single_const_pointer_to_comptime_int_type),
as_ty | @enumToInt(zir.Inst.Ref.const_slice_u8_type),
as_ty | @enumToInt(zir.Inst.Ref.enum_literal_type),
as_comptime_int | @enumToInt(zir.Inst.Ref.zero),
as_comptime_int | @enumToInt(zir.Inst.Ref.one),
as_bool | @enumToInt(zir.Inst.Ref.bool_true),
as_bool | @enumToInt(zir.Inst.Ref.bool_false),
as_usize | @enumToInt(zir.Inst.Ref.zero_usize),
as_usize | @enumToInt(zir.Inst.Ref.one_usize),
as_void | @enumToInt(zir.Inst.Ref.void_value),
=> return result, // type of result is already correct
// Need an explicit type coercion instruction.
else => return gz.addPlNode(.as_node, src_node, zir.Inst.As{
.dest_type = ty_inst,
.operand = result,
}),
}
},
.ptr => |ptr_inst| {
_ = try gz.addPlNode(.store_node, src_node, zir.Inst.Bin{
.lhs = ptr_inst,
.rhs = result,
});
return result;
},
.inferred_ptr => |alloc| {
_ = try gz.addBin(.store_to_inferred_ptr, alloc, result);
return result;
},
.block_ptr => |block_scope| {
block_scope.rvalue_rl_count += 1;
_ = try gz.addBin(.store_to_block_ptr, block_scope.rl_ptr, result);
return result;
},
}
} | src/AstGen.zig |
const Archive = @This();
const std = @import("std");
const fmt = std.fmt;
const fs = std.fs;
const mem = std.mem;
const log = std.log.scoped(.archive);
const Allocator = std.mem.Allocator;
file: fs.File,
name: []const u8,
archive_type: ArchiveType,
files: std.ArrayListUnmanaged(ArchivedFile),
// Use it so we can easily lookup files indices when inserting!
// TODO: A trie is probably a lot better here
filename_to_index: std.StringArrayHashMapUnmanaged(u64),
pub const ArchiveType = enum {
ambiguous,
gnu,
gnu64,
bsd,
darwin64, // darwin_32 *is* bsd
coff, // (windows)
};
pub const Operation = enum {
insert,
delete,
move,
print,
quick_append,
ranlib,
display_contents,
extract,
};
// All archive files start with this magic string
pub const magic_string = "!<arch>\n";
// GNU constants
pub const gnu_first_line_buffer_length = 60;
pub const gnu_string_table_seek_pos = magic_string.len + gnu_first_line_buffer_length;
// BSD constants
pub const bsd_name_length_signifier = "#1/";
// The format (unparsed) of the archive per-file header
// NOTE: The reality is more complex than this as different mechanisms
// have been devised for storing the names of files which exceed 16 byte!
pub const Header = extern struct {
ar_name: [16]u8,
ar_date: [12]u8,
ar_uid: [6]u8,
ar_gid: [6]u8,
ar_mode: [8]u8,
ar_size: [10]u8,
ar_fmag: [2]u8,
};
pub const FileSource = enum {
archive,
file,
};
// TODO: file based representation has a problem that when doing delete file operation
// it is possible that the file contents get replace before it gets written on.
// This is highly unpredictable and we cannot even guess about which file contents are
// getting overwritten in which order
pub const Contents = struct {
file: fs.File,
seek_pos: u64,
length: u64,
file_source: FileSource,
// mode: u64,
// TODO: dellocation
pub fn write(self: *const Contents, out_stream: anytype, stderr: anytype) !void {
try self.file.seekTo(self.seek_pos);
var reader = self.file.reader();
// TODO: select a decent default buffer size (and have a way of controlling it?)
// probably should be allocated on the heap as well.
var buffer: [1000]u8 = undefined;
var total_bytes_read: u64 = 0;
while (true) {
const bytes_read = try reader.read(buffer[0..std.math.min(buffer.len, self.length - total_bytes_read)]);
if (bytes_read == 0) {
break;
}
total_bytes_read = total_bytes_read + bytes_read;
_ = try out_stream.write(buffer[0..bytes_read]);
if (total_bytes_read >= self.length) {
break;
}
}
_ = stderr;
}
};
// An internal represantion of files being archived
pub const ArchivedFile = struct {
name: []const u8,
contents: Contents,
};
pub fn create(
file: fs.File,
name: []const u8,
) Archive {
return Archive{
.file = file,
.name = name,
.archive_type = .ambiguous,
.files = .{},
.filename_to_index = .{},
};
}
// BEGIN_MERGE from https://github.com/iddev5/zar
// TODO: This needs to be integrated into the workflow
// used for parsing. (use same error handling workflow etc.)
/// Use same naming scheme for objects (as found elsewhere in the file).
pub fn finalize(self: *Archive, allocator: *Allocator) !void {
// TODO: Currently this is a bit of a mine-field - so maybe just reading all the file-contents
// into memory is the best bet for now?
// Overwrite all contents
try self.file.seekTo(0);
const writer = self.file.writer();
try writer.writeAll(magic_string);
if (self.archive_type == .ambiguous) {
// TODO: Set this based on the current platform you are using the tool
// on!
self.archive_type = .gnu;
}
const header_names = try allocator.alloc([16]u8, self.files.items.len);
// GNU format: Create string table
if (self.archive_type == .gnu) {
var string_table = std.ArrayList(u8).init(allocator);
defer string_table.deinit();
// Generate the complete string table
for (self.files.items) |file, index| {
const is_the_name_allowed = (file.name.len < 16);
// If the file is small enough to fit in header, then just write it there
// Otherwise, add it to string table and add a reference to its location
const name = if (is_the_name_allowed) try mem.concat(allocator, u8, &.{ file.name, "/" }) else try std.fmt.allocPrint(allocator, "/{}", .{blk: {
// Get the position of the file in string table
const pos = string_table.items.len;
// Now add the file name to string table
try string_table.appendSlice(file.name);
try string_table.appendSlice("/\n");
break :blk pos;
}});
defer allocator.free(name);
// Edit the header
_ = try std.fmt.bufPrint(&(header_names[index]), "{s: <16}", .{name});
}
// Write the string table itself
{
if (string_table.items.len != 0)
try writer.print("//{s}{: <10}`\n{s}", .{ " " ** 46, string_table.items.len, string_table.items });
}
} else if (self.archive_type == .bsd) {
// BSD format: Just write the length of the name in header
for (self.files.items) |file, index| {
_ = try std.fmt.bufPrint(&(header_names[index]), "#1/{: <13}", .{file.name.len});
}
}
// Write the files
for (self.files.items) |file, index| {
// Write the header
// For now, we just write a garbage value to header.name and resolve it later
var headerBuffer: [@sizeOf(Header)]u8 = undefined;
_ = try std.fmt.bufPrint(
&headerBuffer,
"{s: <16}{: <12}{: <6}{: <6}{o: <8}{: <10}`\n",
.{ &header_names[index], 0, 0, 0, 0, file.contents.length },
);
// TODO: handle errors
_ = try writer.write(&headerBuffer);
// Write the name of the file in the data section
if (self.archive_type == .bsd) {
try writer.writeAll(file.name);
}
try file.contents.write(writer, null);
}
// Truncate the file size
try self.file.setEndPos(try self.file.getPos());
}
pub fn deleteFiles(self: *Archive, file_names: ?[][]u8) !void {
// For the list of given file names, find the entry in self.files
// and remove it from self.files.
if (file_names) |names| {
for (names) |file_name| {
for (self.files.items) |file, index| {
if (std.mem.eql(u8, file.name, file_name)) {
_ = self.files.orderedRemove(index);
break;
}
}
}
}
}
// Convenience function for doing mass operations
const OperationErrorSet = Allocator.Error || std.fmt.ParseIntError;
fn massOperation(self: *Archive, file_names: ?[][]u8, data: anytype, cb: fn (item: ArchivedFile, index: usize, data: anytype) OperationErrorSet!void) !void {
if (file_names) |names| {
for (self.files.items) |file, index| {
for (names) |name| {
if (std.mem.eql(u8, file.name, name)) {
try cb(file, index, data);
break;
}
}
}
} else {
for (self.objects.items) |item, index| {
try cb(item, index, data);
}
}
}
fn printOperation(item: ArchivedFile, index: usize, data: anytype) !void {
_ = index;
const writer = data;
try writer.print("{s}", .{item.contents});
}
pub fn print(self: *Archive, file_names: ?[][]u8, writer: std.fs.File.Writer) !void {
try self.massOperation(file_names, writer, printOperation);
}
fn extractOperation(item: ArchivedFile, index: usize, data: anytype) !void {
_ = index;
_ = data;
const file = try std.fs.cwd().createFile(item.name, .{});
defer file.close();
try file.writeAll(item.contents);
}
pub fn extract(self: *Archive, file_names: ?[][]u8) !void {
try self.massOperation(file_names, null, extractOperation);
}
// END_MERGE from https://github.com/iddev5/zar
pub fn insertFiles(self: *Archive, allocator: *Allocator, file_names: ?[][]u8) !void {
if (file_names) |names| {
for (names) |file_name| {
// Open the file and read all of its contents
const file = try std.fs.cwd().openFile(file_name, .{ .read = true });
const file_stats = try file.stat();
const archived_file = ArchivedFile{
.name = file_name, // TODO: sort out the file-name with respect to path
.contents = Contents{
.file_source = .file,
.file = file,
.seek_pos = 0,
.length = file_stats.size,
// .mode = file_stats.mode,
},
};
// A trie-based datastructure would be better for this!
const getOrPutResult = try self.filename_to_index.getOrPut(allocator, archived_file.name);
if (getOrPutResult.found_existing) {
const existing_index = getOrPutResult.value_ptr.*;
self.files.items[existing_index] = archived_file;
} else {
getOrPutResult.value_ptr.* = self.files.items.len;
try self.files.append(allocator, archived_file);
}
}
}
}
pub fn parse(self: *Archive, allocator: *Allocator, stderr: anytype) !void {
const reader = self.file.reader();
{
// Is the magic header found at the start of the archive?
var magic: [magic_string.len]u8 = undefined;
const bytes_read = try reader.read(&magic);
if (bytes_read == 0) {
// Archive is empty and that is ok!
return;
}
if (bytes_read < magic_string.len) {
try stderr.print("File too short to be an archive\n", .{});
return error.NotArchive;
}
if (!mem.eql(u8, &magic, magic_string)) {
try stderr.print("Invalid magic string: expected '{s}', found '{s}'\n", .{ magic_string, magic });
return error.NotArchive;
}
}
// https://www.freebsd.org/cgi/man.cgi?query=ar&sektion=5
// Process string/symbol tables and/or try to infer archive type!
var string_table_contents: []u8 = undefined;
{
var starting_seek_pos = magic_string.len;
var first_line_buffer: [gnu_first_line_buffer_length]u8 = undefined;
const has_line_to_process = result: {
const chars_read = reader.read(&first_line_buffer) catch |err| switch (err) {
else => |e| return e,
};
if (chars_read < first_line_buffer.len) {
break :result false;
}
break :result true;
};
if (has_line_to_process) {
if (mem.eql(u8, first_line_buffer[0..1], "//"[0..1])) {
switch (self.archive_type) {
.ambiguous => self.archive_type = .gnu,
.gnu, .gnu64 => {},
else => {
try stderr.print("Came across gnu-style string table in {} archive\n", .{self.archive_type});
return error.NotArchive;
},
}
const table_size_string = first_line_buffer[48..58];
const table_size = try fmt.parseInt(u32, mem.trim(u8, table_size_string, " "), 10);
string_table_contents = try allocator.alloc(u8, table_size);
// TODO: actually error handle not expected number of bytes being read!
_ = try reader.read(string_table_contents);
starting_seek_pos = starting_seek_pos + first_line_buffer.len + table_size;
}
}
try reader.context.seekTo(starting_seek_pos);
}
while (true) {
const archive_header = reader.readStruct(Header) catch |err| switch (err) {
error.EndOfStream => break,
else => |e| return e,
};
// the lifetime of the archive headers will matched that of the parsed files (for now)
// so we can take a reference to the strings stored there directly!
var trimmed_archive_name = mem.trim(u8, &archive_header.ar_name, " ");
// Check against gnu naming properties
const ends_with_gnu_slash = (trimmed_archive_name[trimmed_archive_name.len - 1] == '/');
var gnu_offset_value: u32 = 0;
const starts_with_gnu_offset = trimmed_archive_name[0] == '/';
if (starts_with_gnu_offset) {
gnu_offset_value = try fmt.parseInt(u32, trimmed_archive_name[1..trimmed_archive_name.len], 10);
}
const must_be_gnu = ends_with_gnu_slash or starts_with_gnu_offset;
// Check against bsd naming properties
const starts_with_bsd_name_length = (trimmed_archive_name.len >= 2) and mem.eql(u8, trimmed_archive_name[0..2], bsd_name_length_signifier[0..2]);
const could_be_bsd = starts_with_bsd_name_length;
// TODO: Have a proper mechanism for erroring on the wrong types of archive.
switch (self.archive_type) {
.ambiguous => {
if (must_be_gnu) {
self.archive_type = .gnu;
} else if (could_be_bsd) {
self.archive_type = .bsd;
} else {
return error.TODO;
}
},
.gnu, .gnu64 => {
if (!must_be_gnu) {
try stderr.print("Error parsing archive header name - format of {s} wasn't gnu compatible\n", .{trimmed_archive_name});
return error.BadArchive;
}
},
.bsd, .darwin64 => {
if (must_be_gnu) {
try stderr.print("Error parsing archive header name - format of {s} wasn't bsd compatible\n", .{trimmed_archive_name});
return error.BadArchive;
}
},
else => {
if (must_be_gnu) {
return error.TODO;
}
return error.TODO;
},
}
if (ends_with_gnu_slash) {
// slice-off the slash
trimmed_archive_name = trimmed_archive_name[0 .. trimmed_archive_name.len - 1];
}
if (starts_with_gnu_offset) {
const name_offset_in_string_table = try fmt.parseInt(u32, mem.trim(u8, trimmed_archive_name[1..trimmed_archive_name.len], " "), 10);
// Navigate to the start of the string in the string table
const string_start = string_table_contents[name_offset_in_string_table..string_table_contents.len];
// Find the end of the string (which is always a newline)
const end_string_index = mem.indexOf(u8, string_start, "\n");
if (end_string_index == null) {
try stderr.print("Error parsing name in string table, couldn't find terminating character\n", .{});
return error.NotArchive;
}
const string_full = string_start[0..end_string_index.?];
// String must have a forward slash before the newline, so check that
// is there and remove it as well!
if (string_full[string_full.len - 1] != '/') {
try stderr.print("Error parsing name in string table, didn't find '/' before terminating newline\n", .{});
return error.NotArchive;
}
// Referencing the slice directly is fine as same bumb allocator is
// used as for the rest of the datastructure!
trimmed_archive_name = string_full[0 .. string_full.len - 1];
}
var seek_forward_amount = try fmt.parseInt(u32, mem.trim(u8, &archive_header.ar_size, " "), 10);
// Make sure that these allocations get properly disposed of later!
if (starts_with_bsd_name_length) {
trimmed_archive_name = trimmed_archive_name[bsd_name_length_signifier.len..trimmed_archive_name.len];
const archive_name_length = fmt.parseInt(u32, trimmed_archive_name, 10) catch {
try stderr.print("Error parsing bsd-style string length\n", .{});
return error.NotArchive;
};
const archive_name_buffer = try allocator.alloc(u8, archive_name_length);
// TODO: proper error handling and length checking here!
_ = try reader.read(archive_name_buffer);
seek_forward_amount = seek_forward_amount - archive_name_length;
trimmed_archive_name = archive_name_buffer;
} else {
const archive_name_buffer = try allocator.alloc(u8, trimmed_archive_name.len);
mem.copy(u8, archive_name_buffer, trimmed_archive_name);
trimmed_archive_name = archive_name_buffer;
}
const parsed_file = ArchivedFile{
.name = trimmed_archive_name,
.contents = Contents{
.file = reader.context,
.seek_pos = try reader.context.getPos(),
.length = seek_forward_amount,
.file_source = .archive,
},
};
try self.files.append(allocator, parsed_file);
try reader.context.seekBy(seek_forward_amount);
}
} | src/archive/Archive.zig |
//! A thread-safe resource which supports blocking until signaled.
//! This API is for kernel threads, not evented I/O.
//! This API is statically initializable. It cannot fail to be initialized
//! and it requires no deinitialization. The downside is that it may not
//! integrate as cleanly into other synchronization APIs, or, in a worst case,
//! may be forced to fall back on spin locking. As a rule of thumb, prefer
//! to use `std.ResetEvent` when possible, and use `StaticResetEvent` when
//! the logic needs stronger API guarantees.
const std = @import("std.zig");
const StaticResetEvent = @This();
const SpinLock = std.SpinLock;
const assert = std.debug.assert;
const os = std.os;
const time = std.time;
const linux = std.os.linux;
const windows = std.os.windows;
const testing = std.testing;
impl: Impl = .{},
pub const Impl = if (std.builtin.single_threaded)
DebugEvent
else
AtomicEvent;
/// Sets the event if not already set and wakes up all the threads waiting on
/// the event. It is safe to call `set` multiple times before calling `wait`.
/// However it is illegal to call `set` after `wait` is called until the event
/// is `reset`. This function is thread-safe.
pub fn set(ev: *StaticResetEvent) void {
return ev.impl.set();
}
/// Wait for the event to be set by blocking the current thread.
/// Thread-safe. No spurious wakeups.
/// Upon return from `wait`, the only function available to be called
/// in `StaticResetEvent` is `reset`.
pub fn wait(ev: *StaticResetEvent) void {
return ev.impl.wait();
}
/// Resets the event to its original, unset state.
/// This function is *not* thread-safe. It is equivalent to calling
/// `deinit` followed by `init` but without the possibility of failure.
pub fn reset(ev: *StaticResetEvent) void {
return ev.impl.reset();
}
pub const TimedWaitResult = std.ResetEvent.TimedWaitResult;
/// Wait for the event to be set by blocking the current thread.
/// A timeout in nanoseconds can be provided as a hint for how
/// long the thread should block on the unset event before returning
/// `TimedWaitResult.timed_out`.
/// Thread-safe. No precision of timing is guaranteed.
/// Upon return from `timedWait`, the only function available to be called
/// in `StaticResetEvent` is `reset`.
pub fn timedWait(ev: *StaticResetEvent, timeout_ns: u64) TimedWaitResult {
return ev.impl.timedWait(timeout_ns);
}
/// For single-threaded builds, we use this to detect deadlocks.
/// In unsafe modes this ends up being no-ops.
pub const DebugEvent = struct {
state: State = State.unset,
const State = enum {
unset,
set,
waited,
};
/// This function is provided so that this type can be re-used inside
/// `std.ResetEvent`.
pub fn init(ev: *DebugEvent) void {
ev.* = .{};
}
/// This function is provided so that this type can be re-used inside
/// `std.ResetEvent`.
pub fn deinit(ev: *DebugEvent) void {
ev.* = undefined;
}
pub fn set(ev: *DebugEvent) void {
switch (ev.state) {
.unset => ev.state = .set,
.set => {},
.waited => unreachable, // Not allowed to call `set` until `reset`.
}
}
pub fn wait(ev: *DebugEvent) void {
switch (ev.state) {
.unset => unreachable, // Deadlock detected.
.set => return,
.waited => unreachable, // Not allowed to call `wait` until `reset`.
}
}
pub fn timedWait(ev: *DebugEvent, timeout: u64) TimedWaitResult {
switch (ev.state) {
.unset => return .timed_out,
.set => return .event_set,
.waited => unreachable, // Not allowed to call `wait` until `reset`.
}
}
pub fn reset(ev: *DebugEvent) void {
ev.state = .unset;
}
};
pub const AtomicEvent = struct {
waiters: u32 = 0,
const WAKE = 1 << 0;
const WAIT = 1 << 1;
/// This function is provided so that this type can be re-used inside
/// `std.ResetEvent`.
pub fn init(ev: *AtomicEvent) void {
ev.* = .{};
}
/// This function is provided so that this type can be re-used inside
/// `std.ResetEvent`.
pub fn deinit(ev: *AtomicEvent) void {
ev.* = undefined;
}
pub fn set(ev: *AtomicEvent) void {
const waiters = @atomicRmw(u32, &ev.waiters, .Xchg, WAKE, .Release);
if (waiters >= WAIT) {
return Futex.wake(&ev.waiters, waiters >> 1);
}
}
pub fn wait(ev: *AtomicEvent) void {
switch (ev.timedWait(null)) {
.timed_out => unreachable,
.event_set => return,
}
}
pub fn timedWait(ev: *AtomicEvent, timeout: ?u64) TimedWaitResult {
var waiters = @atomicLoad(u32, &ev.waiters, .Acquire);
while (waiters != WAKE) {
waiters = @cmpxchgWeak(u32, &ev.waiters, waiters, waiters + WAIT, .Acquire, .Acquire) orelse {
if (Futex.wait(&ev.waiters, timeout)) |_| {
return .event_set;
} else |_| {
return .timed_out;
}
};
}
return .event_set;
}
pub fn reset(ev: *AtomicEvent) void {
@atomicStore(u32, &ev.waiters, 0, .Monotonic);
}
pub const Futex = switch (std.Target.current.os.tag) {
.windows => WindowsFutex,
.linux => LinuxFutex,
else => SpinFutex,
};
pub const SpinFutex = struct {
fn wake(waiters: *u32, wake_count: u32) void {}
fn wait(waiters: *u32, timeout: ?u64) !void {
var timer: time.Timer = undefined;
if (timeout != null)
timer = time.Timer.start() catch return error.TimedOut;
while (@atomicLoad(u32, waiters, .Acquire) != WAKE) {
SpinLock.yield();
if (timeout) |timeout_ns| {
if (timer.read() >= timeout_ns)
return error.TimedOut;
}
}
}
};
pub const LinuxFutex = struct {
fn wake(waiters: *u32, wake_count: u32) void {
const waiting = std.math.maxInt(i32); // wake_count
const ptr = @ptrCast(*const i32, waiters);
const rc = linux.futex_wake(ptr, linux.FUTEX_WAKE | linux.FUTEX_PRIVATE_FLAG, waiting);
assert(linux.getErrno(rc) == 0);
}
fn wait(waiters: *u32, timeout: ?u64) !void {
var ts: linux.timespec = undefined;
var ts_ptr: ?*linux.timespec = null;
if (timeout) |timeout_ns| {
ts_ptr = &ts;
ts.tv_sec = @intCast(isize, timeout_ns / time.ns_per_s);
ts.tv_nsec = @intCast(isize, timeout_ns % time.ns_per_s);
}
while (true) {
const waiting = @atomicLoad(u32, waiters, .Acquire);
if (waiting == WAKE)
return;
const expected = @intCast(i32, waiting);
const ptr = @ptrCast(*const i32, waiters);
const rc = linux.futex_wait(ptr, linux.FUTEX_WAIT | linux.FUTEX_PRIVATE_FLAG, expected, ts_ptr);
switch (linux.getErrno(rc)) {
0 => continue,
os.ETIMEDOUT => return error.TimedOut,
os.EINTR => continue,
os.EAGAIN => return,
else => unreachable,
}
}
}
};
pub const WindowsFutex = struct {
pub fn wake(waiters: *u32, wake_count: u32) void {
const handle = getEventHandle() orelse return SpinFutex.wake(waiters, wake_count);
const key = @ptrCast(*const c_void, waiters);
var waiting = wake_count;
while (waiting != 0) : (waiting -= 1) {
const rc = windows.ntdll.NtReleaseKeyedEvent(handle, key, windows.FALSE, null);
assert(rc == .SUCCESS);
}
}
pub fn wait(waiters: *u32, timeout: ?u64) !void {
const handle = getEventHandle() orelse return SpinFutex.wait(waiters, timeout);
const key = @ptrCast(*const c_void, waiters);
// NT uses timeouts in units of 100ns with negative value being relative
var timeout_ptr: ?*windows.LARGE_INTEGER = null;
var timeout_value: windows.LARGE_INTEGER = undefined;
if (timeout) |timeout_ns| {
timeout_ptr = &timeout_value;
timeout_value = -@intCast(windows.LARGE_INTEGER, timeout_ns / 100);
}
// NtWaitForKeyedEvent doesnt have spurious wake-ups
var rc = windows.ntdll.NtWaitForKeyedEvent(handle, key, windows.FALSE, timeout_ptr);
switch (rc) {
.TIMEOUT => {
// update the wait count to signal that we're not waiting anymore.
// if the .set() thread already observed that we are, perform a
// matching NtWaitForKeyedEvent so that the .set() thread doesn't
// deadlock trying to run NtReleaseKeyedEvent above.
var waiting = @atomicLoad(u32, waiters, .Monotonic);
while (true) {
if (waiting == WAKE) {
rc = windows.ntdll.NtWaitForKeyedEvent(handle, key, windows.FALSE, null);
assert(rc == .WAIT_0);
break;
} else {
waiting = @cmpxchgWeak(u32, waiters, waiting, waiting - WAIT, .Acquire, .Monotonic) orelse break;
continue;
}
}
return error.TimedOut;
},
.WAIT_0 => {},
else => unreachable,
}
}
var event_handle: usize = EMPTY;
const EMPTY = ~@as(usize, 0);
const LOADING = EMPTY - 1;
pub fn getEventHandle() ?windows.HANDLE {
var handle = @atomicLoad(usize, &event_handle, .Monotonic);
while (true) {
switch (handle) {
EMPTY => handle = @cmpxchgWeak(usize, &event_handle, EMPTY, LOADING, .Acquire, .Monotonic) orelse {
const handle_ptr = @ptrCast(*windows.HANDLE, &handle);
const access_mask = windows.GENERIC_READ | windows.GENERIC_WRITE;
if (windows.ntdll.NtCreateKeyedEvent(handle_ptr, access_mask, null, 0) != .SUCCESS)
handle = 0;
@atomicStore(usize, &event_handle, handle, .Monotonic);
return @intToPtr(?windows.HANDLE, handle);
},
LOADING => {
SpinLock.yield();
handle = @atomicLoad(usize, &event_handle, .Monotonic);
},
else => {
return @intToPtr(?windows.HANDLE, handle);
},
}
}
}
};
};
test "basic usage" {
var event = StaticResetEvent{};
// test event setting
event.set();
// test event resetting
event.reset();
// test event waiting (non-blocking)
event.set();
event.wait();
event.reset();
event.set();
testing.expectEqual(TimedWaitResult.event_set, event.timedWait(1));
// test cross-thread signaling
if (std.builtin.single_threaded)
return;
const Context = struct {
const Self = @This();
value: u128 = 0,
in: StaticResetEvent = .{},
out: StaticResetEvent = .{},
fn sender(self: *Self) void {
// update value and signal input
testing.expect(self.value == 0);
self.value = 1;
self.in.set();
// wait for receiver to update value and signal output
self.out.wait();
testing.expect(self.value == 2);
// update value and signal final input
self.value = 3;
self.in.set();
}
fn receiver(self: *Self) void {
// wait for sender to update value and signal input
self.in.wait();
assert(self.value == 1);
// update value and signal output
self.in.reset();
self.value = 2;
self.out.set();
// wait for sender to update value and signal final input
self.in.wait();
assert(self.value == 3);
}
fn sleeper(self: *Self) void {
self.in.set();
time.sleep(time.ns_per_ms * 2);
self.value = 5;
self.out.set();
}
fn timedWaiter(self: *Self) !void {
self.in.wait();
testing.expectEqual(TimedWaitResult.timed_out, self.out.timedWait(time.ns_per_us));
try self.out.timedWait(time.ns_per_ms * 100);
testing.expect(self.value == 5);
}
};
var context = Context{};
const receiver = try std.Thread.spawn(&context, Context.receiver);
defer receiver.wait();
context.sender();
if (false) {
// I have now observed this fail on macOS, Windows, and Linux.
// https://github.com/ziglang/zig/issues/7009
var timed = Context.init();
defer timed.deinit();
const sleeper = try std.Thread.spawn(&timed, Context.sleeper);
defer sleeper.wait();
try timed.timedWaiter();
}
} | lib/std/StaticResetEvent.zig |
const ser = @import("../../../lib.zig").ser;
/// Sequence serialization interface.
///
/// Getty sequences are only partially serialized by `getty.Serializer`
/// implementations due to the fact that there are many different ways to
/// iterate over and access the elements of a sequence. As such, this interface
/// is provided so that serialization may be driven and completed by the user
/// of a serializer.
///
/// The interface specifies the following:
///
/// - How to serialize an element of a sequence.
/// - How to finish serialization for a sequence.
///
/// Parameters
/// ==========
///
/// Context
/// -------
///
/// This is the type that implements `getty.ser.Seq` (or a pointer to it).
///
/// Ok
/// --
///
/// The successful return type for all of `getty.ser.Seq`'s methods.
///
/// Error
/// -----
///
/// The error set used by all of `getty.ser.Seq`'s methods upon failure.
///
/// serializeElement
/// ----------------
///
/// A method that serializes an element of a sequence.
///
/// end
/// ---
///
/// A method that ends the serialization of a sequence.
///
/// Examples
/// ========
///
/// ```zig
/// const seq_sb = struct {
/// pub fn is(comptime T: type) bool {
/// return T == [3]i32;
/// }
///
/// pub fn serialize(value: anytype, serializer: anytype) !@TypeOf(serializer).Ok {
/// // Begin sequence serialization.
/// const seq = (try serializer.serializeSeq(3)).seq();
///
/// // Serialize sequence elements.
/// for (value) |elem| {
/// try seq.serializeElement(elem);
/// }
///
/// // End sequence serialization.
/// return try seq.end();
/// }
/// };
/// ```
pub fn Seq(
comptime Context: type,
comptime Ok: type,
comptime Error: type,
comptime serializeElement: fn (Context, anytype) Error!void,
comptime end: fn (Context) Error!Ok,
) type {
return struct {
pub const @"getty.ser.Seq" = struct {
context: Context,
const Self = @This();
/// Successful return type.
pub const Ok = Ok;
/// The error set used upon failure.
pub const Error = Error;
/// Serialize a sequence element.
pub fn serializeElement(self: Self, value: anytype) Error!void {
try serializeElement(self.context, value);
}
/// Finish serializing a sequence.
pub fn end(self: Self) Error!Ok {
return try end(self.context);
}
};
pub fn seq(self: Context) @"getty.ser.Seq" {
return .{ .context = self };
}
};
} | src/ser/interface/seq.zig |
const std = @import("std");
const Pkg = std.build.Pkg;
const FileSource = std.build.FileSource;
pub const pkgs = struct {
pub const zbox = Pkg{
.name = "zbox",
.path = FileSource{
.path = "forks/zbox/src/box.zig",
},
.dependencies = &[_]Pkg{
Pkg{
.name = "ziglyph",
.path = FileSource{
.path = ".gyro/ziglyph-jecolon-github.com-c37d93b6/pkg/src/ziglyph.zig",
},
},
},
};
pub const datetime = Pkg{
.name = "datetime",
.path = FileSource{
.path = ".gyro/zig-datetime-frmdstryr-github.com-4782701c/pkg/src/datetime.zig",
},
};
pub const clap = Pkg{
.name = "clap",
.path = FileSource{
.path = ".gyro/zig-clap-Hejsil-github.com-cf8a34d1/pkg/clap.zig",
},
};
pub const iguanaTLS = Pkg{
.name = "iguanaTLS",
.path = FileSource{
.path = ".gyro/iguanaTLS-nektro-github.com-a48976be/pkg/src/main.zig",
},
};
pub const hzzp = Pkg{
.name = "hzzp",
.path = FileSource{
.path = ".gyro/hzzp-truemedian-github.com-91ab8e74/pkg/src/main.zig",
},
};
pub const tzif = Pkg{
.name = "tzif",
.path = FileSource{
.path = ".gyro/zig-tzif-leroycep-github.com-cbb1d9f6/pkg/tzif.zig",
},
};
pub const ziglyph = Pkg{
.name = "ziglyph",
.path = FileSource{
.path = ".gyro/ziglyph-jecolon-github.com-c37d93b6/pkg/src/ziglyph.zig",
},
};
pub const @"known-folders" = Pkg{
.name = "known-folders",
.path = FileSource{
.path = ".gyro/known-folders-ziglibs-github.com-9db1b992/pkg/known-folders.zig",
},
};
pub fn addAllTo(artifact: *std.build.LibExeObjStep) void {
artifact.addPackage(pkgs.zbox);
artifact.addPackage(pkgs.datetime);
artifact.addPackage(pkgs.clap);
artifact.addPackage(pkgs.iguanaTLS);
artifact.addPackage(pkgs.hzzp);
artifact.addPackage(pkgs.tzif);
artifact.addPackage(pkgs.ziglyph);
artifact.addPackage(pkgs.@"known-folders");
}
};
pub const exports = struct {
pub const bork = Pkg{
.name = "bork",
.path = "src/main.zig",
.dependencies = &[_]Pkg{
pkgs.zbox,
pkgs.datetime,
pkgs.clap,
pkgs.iguanaTLS,
pkgs.hzzp,
pkgs.tzif,
pkgs.ziglyph,
pkgs.@"known-folders",
},
};
}; | deps.zig |
const std = @import("std");
const builtin = @import("builtin");
const fs = std.fs;
const os = std.os;
const io = std.io;
const mem = std.mem;
const fmt = std.fmt;
const system =
if (builtin.link_libc)
@cImport({
@cInclude("termios.h");
@cInclude("sys/ioctl.h");
})
else
struct {
usingnamespace os.system;
const TIOCGWINSZ = os.system.T.IOCGWINSZ;
};
const assert = std.debug.assert;
const ArrayList = std.ArrayList;
const Allocator = mem.Allocator;
usingnamespace @import("util.zig");
/// Input events
pub const Event = union(enum) {
tick,
escape,
up,
down,
left,
right,
other: []const u8,
};
pub const SGR = packed struct {
bold: bool = false,
underline: bool = false,
reverse: bool = false,
fg_black: bool = false,
bg_black: bool = false,
fg_red: bool = false,
bg_red: bool = false,
fg_green: bool = false,
bg_green: bool = false,
fg_yellow: bool = false,
bg_yellow: bool = false,
fg_blue: bool = false,
bg_blue: bool = false,
fg_magenta: bool = false,
bg_magenta: bool = false,
fg_cyan: bool = false,
bg_cyan: bool = false,
fg_white: bool = false,
bg_white: bool = false,
// not
pub fn invert(self: SGR) SGR {
var other = SGR{};
inline for (@typeInfo(SGR).Struct.fields) |field| {
@field(other, field.name) = !@field(self, field.name);
}
return other;
}
// and
pub fn intersect(self: SGR, other: SGR) SGR {
var new = SGR{};
inline for (@typeInfo(SGR).Struct.fields) |field| {
@field(new, field.name) =
@field(self, field.name) and @field(other, field.name);
}
return new;
}
// or
pub fn unify(self: SGR, other: SGR) SGR {
var new = SGR{};
inline for (@typeInfo(SGR).Struct.fields) |field| {
@field(new, field.name) =
@field(self, field.name) or @field(other, field.name);
}
return new;
}
pub fn eql(self: SGR, other: SGR) bool {
inline for (@typeInfo(SGR).Struct.fields) |field| {
if (!(@field(self, field.name) == @field(other, field.name)))
return false;
}
return true;
}
};
pub const InTty = fs.File.Reader;
pub const OutTty = fs.File.Writer;
pub const ErrorSet = struct {
pub const BufWrite = ArrayList(u8).Writer.Error;
pub const TtyWrite = OutTty.Error;
pub const TtyRead = InTty.Error;
pub const Write = ErrorSet.BufWrite || ErrorSet.TtyWrite;
pub const Read = ErrorSet.TtyRead;
pub const Termios = std.os.TermiosGetError || std.os.TermiosSetError;
pub const Setup = Allocator.Error || ErrorSet.Termios || ErrorSet.TtyWrite || fs.File.OpenError;
};
/// write raw text to the terminal output buffer
pub fn send(seq: []const u8) ErrorSet.BufWrite!void {
try state().buffer.out.writer().writeAll(seq);
}
pub fn sendSGR(sgr: SGR) ErrorSet.BufWrite!void {
try send(csi ++ "0"); // always clear
if (sgr.bold) try send(";1");
if (sgr.underline) try send(";4");
if (sgr.reverse) try send(";7");
if (sgr.fg_black) try send(";30");
if (sgr.bg_black) try send(";40");
if (sgr.fg_red) try send(";31");
if (sgr.bg_red) try send(";41");
if (sgr.fg_green) try send(";32");
if (sgr.bg_green) try send(";42");
if (sgr.fg_yellow) try send(";33");
if (sgr.bg_yellow) try send(";43");
if (sgr.fg_blue) try send(";34");
if (sgr.bg_blue) try send(";44");
if (sgr.fg_magenta) try send(";35");
if (sgr.bg_magenta) try send(";45");
if (sgr.fg_cyan) try send(";36");
if (sgr.bg_cyan) try send(";46");
if (sgr.fg_white) try send(";37");
if (sgr.bg_white) try send(";74");
try send("m");
}
/// flush the terminal output buffer to the terminal
pub fn flush() ErrorSet.TtyWrite!void {
const self = state();
try self.tty.out.writeAll(self.buffer.out.items);
self.buffer.out.items.len = 0;
}
/// clear the entire terminal
pub fn clear() ErrorSet.BufWrite!void {
try sequence("2J");
}
pub fn beginSync() ErrorSet.BufWrite!void {
try send("\x1BP=1s\x1B\\");
}
pub fn endSync() ErrorSet.BufWrite!void {
try send("\x1BP=2s\x1B\\");
}
/// provides size of screen as the bottom right most position that you can move
/// your cursor to.
const TermSize = struct { height: usize, width: usize };
pub fn size() os.UnexpectedError!TermSize {
var winsize = mem.zeroes(system.winsize);
const err = os.system.ioctl(state().tty.in.context.handle, system.TIOCGWINSZ, @ptrToInt(&winsize));
if (os.errno(err) == .SUCCESS)
return TermSize{ .height = winsize.ws_row, .width = winsize.ws_col };
return os.unexpectedErrno(os.errno(err));
}
/// Hides cursor if visible
pub fn cursorHide() ErrorSet.BufWrite!void {
try sequence("?25l");
}
/// Shows cursor if hidden.
pub fn cursorShow() ErrorSet.BufWrite!void {
try sequence("?25h");
}
/// warp the cursor to the specified `row` and `col` in the current scrolling
/// region.
pub fn cursorTo(row: usize, col: usize) ErrorSet.BufWrite!void {
try formatSequence("{};{}H", .{ row + 1, col + 1 });
}
/// set up terminal for graphical operation
pub fn setup(alloc: Allocator) ErrorSet.Setup!void {
errdefer termState = null;
termState = .{};
const self = state();
self.buffer.in = try ArrayList(u8).initCapacity(alloc, 4096);
errdefer self.buffer.in.deinit();
self.buffer.out = try ArrayList(u8).initCapacity(alloc, 4096);
errdefer self.buffer.out.deinit();
//TODO: check that we are actually dealing with a tty here
// and either downgrade or error
self.tty.in = (try fs.cwd().openFile("/dev/tty", .{ .mode = .read_only })).reader();
errdefer self.tty.in.context.close();
self.tty.out = (try fs.cwd().openFile("/dev/tty", .{ .mode = .write_only })).writer();
errdefer self.tty.out.context.close();
// store current terminal settings
// and setup the terminal for graphical IO
self.original_termios = try os.tcgetattr(self.tty.in.context.handle);
var termios = self.original_termios;
// termios flags for 'raw' mode.
termios.iflag &= ~@as(
os.system.tcflag_t,
system.IGNBRK | system.BRKINT | system.PARMRK | system.ISTRIP |
system.INLCR | system.IGNCR | system.ICRNL | system.IXON,
);
termios.lflag &= ~@as(
os.system.tcflag_t,
system.ICANON | system.ECHO | system.ECHONL | system.IEXTEN | system.ISIG,
);
termios.oflag &= ~@as(os.system.tcflag_t, system.OPOST);
termios.cflag &= ~@as(os.system.tcflag_t, system.CSIZE | system.PARENB);
termios.cflag |= system.CS8;
termios.cc[VMIN] = 0; // read can timeout before any data is actually written; async timer
termios.cc[VTIME] = 1; // 1/10th of a second
try os.tcsetattr(self.tty.in.context.handle, .FLUSH, termios);
errdefer os.tcsetattr(self.tty.in.context.handle, .FLUSH, self.original_termios) catch {};
try enterAltScreen();
errdefer exitAltScreen() catch unreachable;
try truncMode();
try overwriteMode();
try keypadMode();
try cursorTo(1, 1);
try flush();
}
// set terminal input maximum wait time in 1/10 seconds unit, zero is no wait
pub fn setTimeout(tenths:u8) ErrorSet.Termios!void {
const handle = state().tty.in.context.handle;
var termios = try os.tcgetattr(handle);
termios.cc[VTIME] = tenths;
try os.tcsetattr(handle, .FLUSH, termios);
}
/// generate a terminal/job control signals with certain hotkeys
/// Ctrl-C, Ctrl-Z, Ctrl-S, etc
pub fn handleSignalInput() ErrorSet.Termios!void {
const handle = state().tty.in.context.handle;
var termios = try os.tcgetattr(handle);
termios.lflag |= system.ISIG;
try os.tcsetattr(handle, .FLUSH, termios);
}
/// treat terminal/job control hotkeys as normal input
/// Ctrl-C, Ctrl-Z, Ctrl-S, etc
pub fn ignoreSignalInput() ErrorSet.Termios!void {
const handle = state().tty.in.context.handle;
var termios = try os.tcgetattr(handle);
termios.lflag &= ~@as(os.system.tcflag_t, system.ISIG);
try os.tcsetattr(handle, .FLUSH, termios);
}
/// restore as much of the terminals's original state as possible
pub fn teardown() void {
const self = state();
exitAltScreen() catch {};
flush() catch {};
os.tcsetattr(self.tty.in.context.handle, .FLUSH, self.original_termios) catch {};
self.tty.in.context.close();
self.tty.out.context.close();
self.buffer.in.deinit();
self.buffer.out.deinit();
termState = null;
}
/// read next message from the tty and parse it. takes
/// special action for certain events
pub fn nextEvent() (Allocator.Error || ErrorSet.TtyRead)!?Event {
const max_bytes = 4096;
var total_bytes: usize = 0;
const self = state();
while (true) {
try self.buffer.in.resize(total_bytes + max_bytes);
const bytes_read = try self.tty.in.context.read(
self.buffer.in.items[total_bytes .. max_bytes + total_bytes],
);
total_bytes += bytes_read;
if (bytes_read < max_bytes) {
self.buffer.in.items.len = total_bytes;
break;
}
}
const event = parseEvent();
//std.log.debug("event: {}", .{event});
return event;
}
// internals ///////////////////////////////////////////////////////////////////
const TermState = struct {
tty: struct {
in: InTty = undefined,
out: OutTty = undefined,
} = .{},
buffer: struct {
in: ArrayList(u8) = undefined,
out: ArrayList(u8) = undefined,
} = .{},
original_termios: os.system.termios = undefined,
};
var termState: ?TermState = null;
fn state() callconv(.Inline) *TermState {
if (std.debug.runtime_safety) {
if (termState) |*self| return self else @panic("terminal is not initialized");
} else return &termState.?;
}
fn parseEvent() ?Event {
const data = state().buffer.in.items;
const eql = std.mem.eql;
if (data.len == 0) return Event.tick;
if (eql(u8, data, "\x1B"))
return Event.escape
else if (eql(u8, data, "\x1B[A") or eql(u8, data, "\x1BOA"))
return Event.up
else if (eql(u8, data, "\x1B[B") or eql(u8, data, "\x1BOB"))
return Event.down
else if (eql(u8, data, "\x1B[C") or eql(u8, data, "\x1BOC"))
return Event.right
else if (eql(u8, data, "\x1B[D") or eql(u8, data, "\x1BOD"))
return Event.left
else
return Event{ .other = data };
}
// terminal mode setting functions. ////////////////////////////////////////////
/// sending text to the terminal at a specific offset overwrites preexisting text
/// in this mode.
fn overwriteMode() ErrorSet.BufWrite!void {
try sequence("4l");
}
/// sending text to the terminat at a specific offset pushes preexisting text to
/// the right of the the line in this mode
fn insertMode() ErrorSet.BufWrite!void {
try sequence("4h");
}
/// when the cursor, or text being moved by insertion reaches the last column on
/// the terminal in this mode, it moves to the next like
fn wrapMode() ErrorSet.BufWrite!void {
try sequence("?7h");
}
/// when the cursor reaches the last column on the terminal in this mode, it
/// stops, and further writing changes the contents of the final column in place.
/// when text being pushed by insertion reaches the final column, it is pushed
/// out of the terminal buffer and lost.
fn truncMode() ErrorSet.BufWrite!void {
try sequence("?7l");
}
/// not entirely sure what this does, but it is something about changing the
/// sequences generated by certain types of input, and is usually called when
/// initializing the terminal for 'non-cannonical' input.
fn keypadMode() ErrorSet.BufWrite!void {
try sequence("?1h");
try send("\x1B=");
}
// saves the cursor and then sends a couple of version of the altscreen
// sequence
// this allows you to restore the contents of the display by calling
// exitAltScreeen() later when the program is exiting.
fn enterAltScreen() ErrorSet.BufWrite!void {
try sequence("s");
try sequence("?47h");
try sequence("?1049h");
}
// restores the cursor and then sends a couple version sof the exit_altscreen
// sequence.
fn exitAltScreen() ErrorSet.BufWrite!void {
try sequence("u");
try sequence("?47l");
try sequence("?1049l");
}
// escape sequence construction and printing ///////////////////////////////////
const csi = "\x1B[";
fn sequence(comptime seq: []const u8) ErrorSet.BufWrite!void {
try send(csi ++ seq);
}
fn format(comptime template: []const u8, args: anytype) ErrorSet.BufWrite!void {
const self = state();
try self.buffer.out.writer().print(template, args);
}
fn formatSequence(comptime template: []const u8, args: anytype) ErrorSet.BufWrite!void {
try format(csi ++ template, args);
}
// TODO: these are not portable across architectures
// they should be getting pulled in from c headers or
// make it into linux/bits per architecture.
const VTIME = 5;
const VMIN = 6;
test "static anal" {
std.meta.refAllDecls(@This());
std.meta.refAllDecls(Event);
std.meta.refAllDecls(SGR);
} | src/prim.zig |
const w4 = @This();
const std = @import("std");
/// PLATFORM CONSTANTS
pub const CANVAS_SIZE = 160;
/// Helpers
pub const Vec2 = @import("std").meta.Vector(2, i32);
pub const x = 0;
pub const y = 1;
pub fn texLen(size: Vec2) usize {
return @intCast(usize, std.math.divCeil(i32, size[x] * size[y] * 2, 8) catch unreachable);
}
pub const Mbl = enum { mut, cons };
pub fn Tex(comptime mbl: Mbl) type {
return struct {
// oh that's really annoying…
// ideally there would be a way to have a readonly Tex and a mutable Tex
// and the mutable should implicit cast to readonly
data: switch (mbl) {
.mut => [*]u8,
.cons => [*]const u8,
},
size: Vec2,
pub fn wrapSlice(slice: switch (mbl) {
.mut => []u8,
.cons => []const u8,
}, size: Vec2) Tex(mbl) {
if (slice.len != texLen(size)) {
unreachable;
}
return .{
.data = slice.ptr,
.size = size,
};
}
pub fn cons(tex: Tex(.mut)) Tex(.cons) {
return .{
.data = tex.data,
.size = tex.size,
};
}
pub fn blit(dest: Tex(.mut), dest_ul: Vec2, src: Tex(.cons), src_ul: Vec2, src_wh: Vec2, remap_colors: [4]u3, scale: Vec2) void {
for (range(@intCast(usize, src_wh[y]))) |_, y_usz| {
const yp = @intCast(i32, y_usz);
for (range(@intCast(usize, src_wh[x]))) |_, x_usz| {
const xp = @intCast(i32, x_usz);
const pos = Vec2{ xp, yp };
const value = remap_colors[src.get(src_ul + pos)];
if (value <= std.math.maxInt(u2)) {
dest.rect(pos * scale + dest_ul, scale, @intCast(u2, value));
}
}
}
}
pub fn rect(dest: Tex(.mut), ul: Vec2, wh: Vec2, color: u2) void {
for (range(std.math.lossyCast(usize, wh[y]))) |_, y_usz| {
const yp = @intCast(i32, y_usz);
for (range(std.math.lossyCast(usize, wh[x]))) |_, x_usz| {
const xp = @intCast(i32, x_usz);
dest.set(ul + Vec2{ xp, yp }, color);
}
}
}
pub fn get(tex: Tex(mbl), pos: Vec2) u2 {
if (@reduce(.Or, pos < w4.Vec2{ 0, 0 })) return 0;
if (@reduce(.Or, pos >= tex.size)) return 0;
const index_unscaled = pos[w4.x] + (pos[w4.y] * tex.size[w4.x]);
const index = @intCast(usize, @divFloor(index_unscaled, 4));
const byte_idx = @intCast(u3, (@mod(index_unscaled, 4)) * 2);
return @truncate(u2, tex.data[index] >> byte_idx);
}
pub fn set(tex: Tex(.mut), pos: Vec2, value: u2) void {
if (@reduce(.Or, pos < w4.Vec2{ 0, 0 })) return;
if (@reduce(.Or, pos >= tex.size)) return;
const index_unscaled = pos[w4.x] + (pos[w4.y] * tex.size[w4.x]);
const index = @intCast(usize, @divFloor(index_unscaled, 4));
const byte_idx = @intCast(u3, (@mod(index_unscaled, 4)) * 2);
tex.data[index] &= ~(@as(u8, 0b11) << byte_idx);
tex.data[index] |= @as(u8, value) << byte_idx;
}
};
}
pub fn range(len: usize) []const void {
return @as([*]const void, &[_]void{})[0..len];
}
// pub const Tex1BPP = struct {…};
// ┌───────────────────────────────────────────────────────────────────────────┐
// │ │
// │ Memory Addresses │
// │ │
// └───────────────────────────────────────────────────────────────────────────┘
pub const PALETTE: *[4]u32 = @intToPtr(*[4]u32, 0x04);
pub const DRAW_COLORS: *u16 = @intToPtr(*u16, 0x14);
pub const GAMEPAD1: *const Gamepad = @intToPtr(*const Gamepad, 0x16);
pub const GAMEPAD2: *const Gamepad = @intToPtr(*const Gamepad, 0x17);
pub const GAMEPAD3: *const Gamepad = @intToPtr(*const Gamepad, 0x18);
pub const GAMEPAD4: *const Gamepad = @intToPtr(*const Gamepad, 0x19);
pub const MOUSE: *const Mouse = @intToPtr(*const Mouse, 0x1a);
pub const SYSTEM_FLAGS: *SystemFlags = @intToPtr(*SystemFlags, 0x1f);
pub const FRAMEBUFFER: *[CANVAS_SIZE * CANVAS_SIZE / 4]u8 = @intToPtr(*[6400]u8, 0xA0);
pub const ctx = Tex(.mut){
.data = @intToPtr([*]u8, 0xA0), // apparently casting *[N]u8 to [*]u8 at comptime causes a compiler crash
.size = .{ CANVAS_SIZE, CANVAS_SIZE },
};
pub const Gamepad = packed struct {
button_1: bool = false,
button_2: bool = false,
_: u2 = 0,
button_left: bool = false,
button_right: bool = false,
button_up: bool = false,
button_down: bool = false,
comptime {
if (@sizeOf(@This()) != @sizeOf(u8)) unreachable;
}
pub fn format(value: @This(), comptime _: []const u8, _: @import("std").fmt.FormatOptions, writer: anytype) !void {
if (value.button_1) try writer.writeAll("1");
if (value.button_2) try writer.writeAll("2");
if (value.button_left) try writer.writeAll("<"); //"←");
if (value.button_right) try writer.writeAll(">");
if (value.button_up) try writer.writeAll("^");
if (value.button_down) try writer.writeAll("v");
}
};
pub const Mouse = packed struct {
x: i16 = 0,
y: i16 = 0,
buttons: MouseButtons = .{},
pub fn pos(mouse: Mouse) Vec2 {
return .{ mouse.x, mouse.y };
}
comptime {
if (@sizeOf(@This()) != 5) unreachable;
}
};
pub const MouseButtons = packed struct {
left: bool = false,
right: bool = false,
middle: bool = false,
_: u5 = 0,
comptime {
if (@sizeOf(@This()) != @sizeOf(u8)) unreachable;
}
};
pub const SystemFlags = packed struct {
preserve_framebuffer: bool,
hide_gamepad_overlay: bool,
_: u6 = 0,
comptime {
if (@sizeOf(@This()) != @sizeOf(u8)) unreachable;
}
};
pub const SYSTEM_PRESERVE_FRAMEBUFFER: u8 = 1;
pub const SYSTEM_HIDE_GAMEPAD_OVERLAY: u8 = 2;
// ┌───────────────────────────────────────────────────────────────────────────┐
// │ │
// │ Drawing Functions │
// │ │
// └───────────────────────────────────────────────────────────────────────────┘
pub const externs = struct {
pub extern fn blit(sprite: [*]const u8, x: i32, y: i32, width: i32, height: i32, flags: u32) void;
pub extern fn blitSub(sprite: [*]const u8, x: i32, y: i32, width: i32, height: i32, src_x: i32, src_y: i32, strie: i32, flags: u32) void;
pub extern fn line(x1: i32, y1: i32, x2: i32, y2: i32) void;
pub extern fn oval(x: i32, y: i32, width: i32, height: i32) void;
pub extern fn rect(x: i32, y: i32, width: i32, height: i32) void;
pub extern fn textUtf8(strPtr: [*]const u8, strLen: usize, x: i32, y: i32) void;
/// Draws a vertical line
extern fn vline(x: i32, y: i32, len: u32) void;
/// Draws a horizontal line
extern fn hline(x: i32, y: i32, len: u32) void;
pub extern fn tone(frequency: u32, duration: u32, volume: u32, flags: u32) void;
};
/// Copies pixels to the framebuffer.
pub fn blit(sprite: []const u8, pos: Vec2, size: Vec2, flags: BlitFlags) void {
externs.blit(sprite.ptr, pos[x], pos[y], size[x], size[y], @bitCast(u32, flags));
}
/// Copies a subregion within a larger sprite atlas to the framebuffer.
pub fn blitSub(sprite: []const u8, pos: Vec2, size: Vec2, src: Vec2, strie: i32, flags: BlitFlags) void {
externs.blitSub(sprite.ptr, pos[x], pos[y], size[x], size[y], src[x], src[y], strie, @bitCast(u32, flags));
}
pub const BlitFlags = packed struct {
bpp: enum(u1) {
b1,
b2,
},
flip_x: bool = false,
flip_y: bool = false,
rotate: bool = false,
_: u28 = 0,
comptime {
if (@sizeOf(@This()) != @sizeOf(u32)) unreachable;
}
};
/// Draws a line between two points.
pub fn line(pos1: Vec2, pos2: Vec2) void {
externs.line(pos1[x], pos1[y], pos2[x], pos2[y]);
}
/// Draws an oval (or circle).
pub fn oval(ul: Vec2, size: Vec2) void {
externs.oval(ul[x], ul[y], size[x], size[y]);
}
/// Draws a rectangle.
pub fn rect(ul: Vec2, size: Vec2) void {
externs.rect(ul[x], ul[y], size[x], size[y]);
}
/// Draws text using the built-in system font.
pub fn text(str: []const u8, pos: Vec2) void {
externs.textUtf8(str.ptr, str.len, pos[x], pos[y]);
}
// ┌───────────────────────────────────────────────────────────────────────────┐
// │ │
// │ Sound Functions │
// │ │
// └───────────────────────────────────────────────────────────────────────────┘
/// Plays a sound tone.
pub fn tone(frequency: ToneFrequency, duration: ToneDuration, volume: u32, flags: ToneFlags) void {
return externs.tone(@bitCast(u32, frequency), @bitCast(u32, duration), volume, @bitCast(u8, flags));
}
pub const ToneFrequency = packed struct {
start: u16,
end: u16 = 0,
comptime {
if (@sizeOf(@This()) != @sizeOf(u32)) unreachable;
}
};
pub const ToneDuration = packed struct {
sustain: u8 = 0,
release: u8 = 0,
decay: u8 = 0,
attack: u8 = 0,
comptime {
if (@sizeOf(@This()) != @sizeOf(u32)) unreachable;
}
};
pub const ToneFlags = packed struct {
pub const Channel = enum(u2) {
pulse1,
pulse2,
triangle,
noise,
};
pub const Mode = enum(u2) {
p12_5,
p25,
p50,
p75,
};
channel: Channel,
mode: Mode = .p12_5,
_: u4 = 0,
comptime {
if (@sizeOf(@This()) != @sizeOf(u8)) unreachable;
}
};
// ┌───────────────────────────────────────────────────────────────────────────┐
// │ │
// │ Storage Functions │
// │ │
// └───────────────────────────────────────────────────────────────────────────┘
/// Reads up to `size` bytes from persistent storage into the pointer `dest`.
pub extern fn diskr(dest: [*]u8, size: u32) u32;
/// Writes up to `size` bytes from the pointer `src` into persistent storage.
pub extern fn diskw(src: [*]const u8, size: u32) u32;
// ┌───────────────────────────────────────────────────────────────────────────┐
// │ │
// │ Other Functions │
// │ │
// └───────────────────────────────────────────────────────────────────────────┘
/// Prints a message to the debug console.
/// Disabled in release builds.
pub fn trace(comptime fmt: []const u8, args: anytype) void {
if (@import("builtin").mode != .Debug) @compileError("trace not allowed in release builds.");
// stack size is [8192]u8
var buffer: [100]u8 = undefined;
var fbs = std.io.fixedBufferStream(&buffer);
const writer = fbs.writer();
writer.print(fmt, args) catch {
const err_msg = switch (@import("builtin").mode) {
.Debug => "[trace err] " ++ fmt,
else => "[trace err]", // max 100 bytes in trace message.
};
return traceUtf8(err_msg, err_msg.len);
};
traceUtf8(&buffer, fbs.pos);
}
extern fn traceUtf8(str_ptr: [*]const u8, str_len: usize) void;
/// Use with caution, as there's no compile-time type checking.
///
/// * %c, %d, and %x expect 32-bit integers.
/// * %f expects 64-bit floats.
/// * %s expects a *zero-terminated* string pointer.
///
/// See https://github.com/aduros/wasm4/issues/244 for discussion and type-safe
/// alternatives.
pub extern fn tracef(x: [*:0]const u8, ...) void; | src/wasm4.zig |
const std = @import("std");
const debug = std.debug;
const os = std.os;
const mem = std.mem;
const fs = std.fs;
const File = fs.File;
const Dir = fs.Dir;
const fmt = std.fmt;
const BUFFERSIZE = 8292 * 2;
pub const ProcessInformation = struct {
pid: i32 = 0,
// 8k for buffer ? is it enought ?
commandlinebuffer: [BUFFERSIZE]u8 = [_]u8{'\x00'} ** BUFFERSIZE,
commandlinebuffer_size: u16 = 0,
const Self = @This();
// iterator on the command line arguments
const Iterator = struct {
inner: *const Self,
currentIndex: u16 = 0,
const SelfIterator = @This();
pub fn next(self: *SelfIterator) ?[]const u8 {
if (self.currentIndex >= self.inner.commandlinebuffer_size) {
return null;
}
const start = self.currentIndex;
// seek for next 0 ending or end of commandline buffer_size
while (self.currentIndex < self.inner.commandlinebuffer_size and self.inner.commandlinebuffer[self.currentIndex] != '\x00') {
self.currentIndex = self.currentIndex + 1;
}
// move to next after returning the element slice
defer self.currentIndex = self.currentIndex + 1;
return self.inner.commandlinebuffer[start..self.currentIndex];
}
};
pub fn iterator(processInfo: *Self) Iterator {
return Iterator{ .inner = processInfo };
}
};
// get process information, for a specific PID
// return false if the process does not exists,
// return true and the processInfo is populated with the command line options
pub fn getProcessInformations(pid: i32, processInfo: *ProcessInformation) !bool {
var buffer = [_]u8{'\x00'} ** 8192;
const options = Dir.OpenDirOptions{
.access_sub_paths = true,
.iterate = true,
};
var procDir = try fs.cwd().openDir("/proc", options);
defer procDir.close();
const r = try fmt.bufPrint(&buffer, "{}", .{pid});
var subprocDir: Dir = procDir.openDir(r, options) catch |e| {
return false;
};
defer subprocDir.close();
const flags = File.OpenFlags{ .read = false };
var commandLineFile: File = try subprocDir.openFile("cmdline", File.OpenFlags{});
defer commandLineFile.close();
const readSize = try commandLineFile.pread(processInfo.commandlinebuffer[0..], 0);
processInfo.commandlinebuffer_size = @intCast(u16, readSize);
processInfo.*.pid = pid;
return true;
}
// test if buffer contains only digits
fn isAllNumeric(buffer: []const u8) bool {
for (buffer) |c| {
if (c > '9' or c < '0') {
return false;
}
}
return true;
}
// Process browsing
//
const ProcessInformationCallback = fn (processInformation: *ProcessInformation) void;
// function that list processes and grab command line arguments
// a call back is taken from
pub fn listProcesses(callback: ProcessInformationCallback) !void {
const options = Dir.OpenDirOptions{
.access_sub_paths = true,
.iterate = true,
};
var procDir = try fs.cwd().openDir("/proc", options);
defer procDir.close();
var dirIterator = procDir.iterate();
while (try dirIterator.next()) |f| {
if (f.kind == File.Kind.File) {
continue;
}
if (!isAllNumeric(f.name)) {
continue;
}
const pid = try fmt.parseInt(i32, f.name, 10);
var pi = ProcessInformation{};
const successGetInformations = try getProcessInformations(pid, &pi);
if (successGetInformations and pi.commandlinebuffer_size > 0) {
callback(&pi);
// debug.warn(" {}: {} \n", .{ pid, pi.commandlinebuffer[0..pi.commandlinebuffer_size] });
}
// try opening the commandline file
}
}
fn testCallback(processInformation: *ProcessInformation) void {
debug.warn("processinformation : {}", .{processInformation});
// dump the commnand line buffer
var it = processInformation.iterator();
while (it.next()) |i| {
debug.warn(" {}\n", .{i});
}
}
test "check existing process" {
try listProcesses(testCallback);
} | processlib.zig |
const std = @import("std");
const assert = std.debug.assert;
const meta = std.meta;
const builtin = std.builtin;
usingnamespace @cImport({
@cInclude("stdio.h");
@cInclude("string.h");
@cInclude("unistd.h");
@cInclude("time.h");
@cInclude("errno.h");
@cInclude("stdintfix.h"); // NB: Required as zig is unable to process some macros
@cInclude("GL/gl.h");
@cInclude("GL/glx.h");
@cInclude("GL/glext.h");
@cInclude("bgfx/c99/bgfx.h");
// @cInclude("bgfx/platform.h");
});
const sdl = @cImport({
@cInclude("SDL2/SDL.h");
@cInclude("SDL2/SDL_syswm.h");
});
fn sdlSetWindow(window: *sdl.SDL_Window) !void {
var wmi: sdl.SDL_SysWMinfo = undefined;
wmi.version.major = sdl.SDL_MAJOR_VERSION;
wmi.version.minor = sdl.SDL_MINOR_VERSION;
wmi.version.patch = sdl.SDL_PATCHLEVEL;
if (sdl.SDL_GetWindowWMInfo(window, &wmi) == .SDL_FALSE) {
return error.SDL_FAILED_INIT;
}
var pd = std.mem.zeroes(bgfx_platform_data_t);
if (builtin.os.tag == .linux) {
pd.ndt = wmi.info.x11.display;
pd.nwh = meta.cast(*c_void, wmi.info.x11.window);
}
if (builtin.os.tag == .freebsd) {
pd.ndt = wmi.info.x11.display;
pd.nwh = meta.cast(*c_void, wmi.info.x11.window);
}
if (builtin.os.tag == .macos) {
pd.ndt = NULL;
pd.nwh = wmi.info.cocoa.window;
}
if (builtin.os.tag == .windows) {
pd.ndt = NULL;
pd.nwh = wmi.info.win.window;
}
//if (builtin.os.tag == .steamlink) {
// pd.ndt = wmi.info.vivante.display;
// pd.nwh = wmi.info.vivante.window;
//}
pd.context = NULL;
pd.backBuffer = NULL;
pd.backBufferDS = NULL;
bgfx_set_platform_data(&pd);
}
// COPIED FROM https://bedroomcoders.co.uk/using-bgfx-from-c/
const PosColorVertex = packed struct {
x: f32,
y: f32,
z: f32,
abgr: c_uint
};
const cubeVertices = [_]PosColorVertex
{
{-.x=1.0; .y= 1.0; .z=1.0; .abge=0xff000000; },
{ .x=1.0; .y= 1.0; .z=1.0; .abge=0xff0000ff; },
{-.x=1.0; .y=-1.0; .z=1.0; .abge=0xff00ff00; },
{ .x=1.0; .y=-1.0; .z=1.0; .abge=0xff00ffff; },
{-.x=1.0; .y= 1.0; .z=1.0; .abge=0xffff0000; },
{ .x=1.0; .y= 1.0; .z=1.0; .abge=0xffff00ff; },
{-.x=1.0; .y=-1.0; .z=1.0; .abge=0xffffff00; },
{ .x=1.0; .y=-1.0; .z=1.0; .abge=0xffffffff; },
};
const cubeTriList = [_]c_uint
{
0, 1, 2,
1, 3, 2,
4, 6, 5,
5, 6, 7,
0, 2, 4,
4, 2, 6,
1, 5, 3,
5, 7, 3,
0, 4, 1,
4, 5, 1,
2, 3, 6,
6, 3, 7,
};
// export fn loadShader(FILENAME:char *)
// {
// const char* shaderPath = "???";
// //dx11/ dx9/ essl/ glsl/ metal/ pssl/ spirv/
// bgfx_shader_handle_t invalid = BGFX_INVALID_HANDLE;
// switch(bgfx_get_renderer_type()) {
// case BGFX_RENDERER_TYPE_NOOP:
// case BGFX_RENDERER_TYPE_DIRECT3D9: shaderPath = "shaders/dx9/"; break;
// case BGFX_RENDERER_TYPE_DIRECT3D11:
// case BGFX_RENDERER_TYPE_DIRECT3D12: shaderPath = "shaders/dx11/"; break;
// case BGFX_RENDERER_TYPE_GNM: shaderPath = "shaders/pssl/"; break;
// case BGFX_RENDERER_TYPE_METAL: shaderPath = "shaders/metal/"; break;
// case BGFX_RENDERER_TYPE_OPENGL: shaderPath = "shaders/glsl/"; break;
// case BGFX_RENDERER_TYPE_OPENGLES: shaderPath = "shaders/essl/"; break;
// case BGFX_RENDERER_TYPE_VULKAN: shaderPath = "shaders/spirv/"; break;
// case BGFX_RENDERER_TYPE_NVN:
// case BGFX_RENDERER_TYPE_WEBGPU:
// case BGFX_RENDERER_TYPE_COUNT: return invalid; // count included to keep compiler warnings happy
// }
// size_t shaderLen = strlen(shaderPath);
// size_t fileLen = strlen(FILENAME);
// char *filePath = (char *)malloc(shaderLen + fileLen + 1);
// memcpy(filePath, shaderPath, shaderLen);
// memcpy(&filePath[shaderLen], FILENAME, fileLen);
// filePath[shaderLen+fileLen] = 0; // properly null terminate
// FILE *file = fopen(filePath, "rb");
// if (!file) {
// return invalid;
// }
// fseek(file, 0, SEEK_END);
// long fileSize = ftell(file);
// fseek(file, 0, SEEK_SET);
// const bgfx_memory_t *mem = bgfx_alloc(fileSize + 1);
// fread(mem->data, 1, fileSize, file);
// mem->data[mem->size - 1] = '\0';
// fclose(file);
// return bgfx_create_shader(mem);
// }
pub fn main() !void {
const out = std.io.getStdOut().writer();
try out.print("Hello, {s}!\n", .{"world"});
_ = sdl.SDL_Init(0);
defer sdl.SDL_Quit();
const window = sdl.SDL_CreateWindow("bgfx", sdl.SDL_WINDOWPOS_UNDEFINED, sdl.SDL_WINDOWPOS_UNDEFINED, 800, 600, sdl.SDL_WINDOW_SHOWN | sdl.SDL_WINDOW_RESIZABLE).?;
defer sdl.SDL_DestroyWindow(window);
try sdlSetWindow(window);
var in = std.mem.zeroes(bgfx_init_t);
in.type = bgfx_renderer_type.BGFX_RENDERER_TYPE_COUNT; // Automatically choose a renderer.
in.resolution.width = 800;
in.resolution.height = 600;
in.resolution.reset = BGFX_RESET_VSYNC;
var success = bgfx_init(&in);
defer bgfx_shutdown();
assert(success);
bgfx_set_debug(BGFX_DEBUG_TEXT);
bgfx_set_view_clear(0, BGFX_CLEAR_COLOR | BGFX_CLEAR_DEPTH, 0x443355FF, 1.0, 0);
bgfx_set_view_rect(0, 0, 0, 800, 600);
// const pcvDecl = bgfx_vertex_decl;
// pcvDecl.begin()
// .add(bgfx::Attrib::Position, 3, bgfx::AttribType::Float)
// .add(bgfx::Attrib::Color0, 4, bgfx::AttribType::Uint8, true)
// .end();
// bgfx::VertexBufferHandle vbh = bgfx::createVertexBuffer(bgfx::makeRef(cubeVertices, sizeof(cubeVertices)), pcvDecl);
// bgfx::IndexBufferHandle ibh = bgfx::createIndexBuffer(bgfx::makeRef(cubeTriList, sizeof(cubeTriList)));
// unsigned int counter = 0;
var frame_number: u64 = 0;
gameloop: while (true) {
var event: sdl.SDL_Event = undefined;
var should_exit = false;
while (sdl.SDL_PollEvent(&event) == 1) {
switch (event.type) {
sdl.SDL_QUIT => should_exit = true,
sdl.SDL_WINDOWEVENT => {
const wev = &event.window;
switch (wev.event) {
sdl.SDL_WINDOWEVENT_RESIZED, sdl.SDL_WINDOWEVENT_SIZE_CHANGED => {},
sdl.SDL_WINDOWEVENT_CLOSE => should_exit = true,
else => {},
}
},
else => {},
}
}
if (should_exit) break :gameloop;
bgfx_set_view_rect(0, 0, 0, 800, 600);
bgfx_touch(0);
bgfx_dbg_text_clear(0, false);
bgfx_dbg_text_printf(0, 1, 0x4f, "Frame#:%d", frame_number);
frame_number = bgfx_frame(false);
}
} | src/main.zig |
//--------------------------------------------------------------------------------
// Section: Types (21)
//--------------------------------------------------------------------------------
const CLSID_GameExplorer_Value = @import("zig.zig").Guid.initString("9a5ea990-3034-4d6f-9128-01f3c61022bc");
pub const CLSID_GameExplorer = &CLSID_GameExplorer_Value;
const CLSID_GameStatistics_Value = @import("zig.zig").Guid.initString("dbc85a2c-c0dc-4961-b6e2-d28b62c11ad4");
pub const CLSID_GameStatistics = &CLSID_GameStatistics_Value;
pub const GAME_INSTALL_SCOPE = enum(i32) {
NOT_INSTALLED = 1,
CURRENT_USER = 2,
ALL_USERS = 3,
};
pub const GIS_NOT_INSTALLED = GAME_INSTALL_SCOPE.NOT_INSTALLED;
pub const GIS_CURRENT_USER = GAME_INSTALL_SCOPE.CURRENT_USER;
pub const GIS_ALL_USERS = GAME_INSTALL_SCOPE.ALL_USERS;
const IID_IGameExplorer_Value = @import("zig.zig").Guid.initString("e7b2fb72-d728-49b3-a5f2-18ebf5f1349e");
pub const IID_IGameExplorer = &IID_IGameExplorer_Value;
pub const IGameExplorer = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
AddGame: fn(
self: *const IGameExplorer,
bstrGDFBinaryPath: ?BSTR,
bstrGameInstallDirectory: ?BSTR,
installScope: GAME_INSTALL_SCOPE,
pguidInstanceID: ?*Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
RemoveGame: fn(
self: *const IGameExplorer,
guidInstanceID: Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
UpdateGame: fn(
self: *const IGameExplorer,
guidInstanceID: Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
VerifyAccess: fn(
self: *const IGameExplorer,
bstrGDFBinaryPath: ?BSTR,
pfHasAccess: ?*BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGameExplorer_AddGame(self: *const T, bstrGDFBinaryPath: ?BSTR, bstrGameInstallDirectory: ?BSTR, installScope: GAME_INSTALL_SCOPE, pguidInstanceID: ?*Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IGameExplorer.VTable, self.vtable).AddGame(@ptrCast(*const IGameExplorer, self), bstrGDFBinaryPath, bstrGameInstallDirectory, installScope, pguidInstanceID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGameExplorer_RemoveGame(self: *const T, guidInstanceID: Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IGameExplorer.VTable, self.vtable).RemoveGame(@ptrCast(*const IGameExplorer, self), guidInstanceID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGameExplorer_UpdateGame(self: *const T, guidInstanceID: Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IGameExplorer.VTable, self.vtable).UpdateGame(@ptrCast(*const IGameExplorer, self), guidInstanceID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGameExplorer_VerifyAccess(self: *const T, bstrGDFBinaryPath: ?BSTR, pfHasAccess: ?*BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IGameExplorer.VTable, self.vtable).VerifyAccess(@ptrCast(*const IGameExplorer, self), bstrGDFBinaryPath, pfHasAccess);
}
};}
pub usingnamespace MethodMixin(@This());
};
pub const GAMESTATS_OPEN_TYPE = enum(i32) {
RCREATE = 0,
NLY = 1,
};
pub const GAMESTATS_OPEN_OPENORCREATE = GAMESTATS_OPEN_TYPE.RCREATE;
pub const GAMESTATS_OPEN_OPENONLY = GAMESTATS_OPEN_TYPE.NLY;
pub const GAMESTATS_OPEN_RESULT = enum(i32) {
CREATED = 0,
OPENED = 1,
};
pub const GAMESTATS_OPEN_CREATED = GAMESTATS_OPEN_RESULT.CREATED;
pub const GAMESTATS_OPEN_OPENED = GAMESTATS_OPEN_RESULT.OPENED;
const IID_IGameStatistics_Value = @import("zig.zig").Guid.initString("3887c9ca-04a0-42ae-bc4c-5fa6c7721145");
pub const IID_IGameStatistics = &IID_IGameStatistics_Value;
pub const IGameStatistics = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
GetMaxCategoryLength: fn(
self: *const IGameStatistics,
cch: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetMaxNameLength: fn(
self: *const IGameStatistics,
cch: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetMaxValueLength: fn(
self: *const IGameStatistics,
cch: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetMaxCategories: fn(
self: *const IGameStatistics,
pMax: ?*u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetMaxStatsPerCategory: fn(
self: *const IGameStatistics,
pMax: ?*u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetCategoryTitle: fn(
self: *const IGameStatistics,
categoryIndex: u16,
title: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetCategoryTitle: fn(
self: *const IGameStatistics,
categoryIndex: u16,
pTitle: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetStatistic: fn(
self: *const IGameStatistics,
categoryIndex: u16,
statIndex: u16,
pName: ?*?PWSTR,
pValue: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetStatistic: fn(
self: *const IGameStatistics,
categoryIndex: u16,
statIndex: u16,
name: ?[*:0]const u16,
value: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Save: fn(
self: *const IGameStatistics,
trackChanges: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetLastPlayedCategory: fn(
self: *const IGameStatistics,
categoryIndex: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetLastPlayedCategory: fn(
self: *const IGameStatistics,
pCategoryIndex: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGameStatistics_GetMaxCategoryLength(self: *const T, cch: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IGameStatistics.VTable, self.vtable).GetMaxCategoryLength(@ptrCast(*const IGameStatistics, self), cch);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGameStatistics_GetMaxNameLength(self: *const T, cch: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IGameStatistics.VTable, self.vtable).GetMaxNameLength(@ptrCast(*const IGameStatistics, self), cch);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGameStatistics_GetMaxValueLength(self: *const T, cch: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IGameStatistics.VTable, self.vtable).GetMaxValueLength(@ptrCast(*const IGameStatistics, self), cch);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGameStatistics_GetMaxCategories(self: *const T, pMax: ?*u16) callconv(.Inline) HRESULT {
return @ptrCast(*const IGameStatistics.VTable, self.vtable).GetMaxCategories(@ptrCast(*const IGameStatistics, self), pMax);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGameStatistics_GetMaxStatsPerCategory(self: *const T, pMax: ?*u16) callconv(.Inline) HRESULT {
return @ptrCast(*const IGameStatistics.VTable, self.vtable).GetMaxStatsPerCategory(@ptrCast(*const IGameStatistics, self), pMax);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGameStatistics_SetCategoryTitle(self: *const T, categoryIndex: u16, title: ?[*:0]const u16) callconv(.Inline) HRESULT {
return @ptrCast(*const IGameStatistics.VTable, self.vtable).SetCategoryTitle(@ptrCast(*const IGameStatistics, self), categoryIndex, title);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGameStatistics_GetCategoryTitle(self: *const T, categoryIndex: u16, pTitle: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IGameStatistics.VTable, self.vtable).GetCategoryTitle(@ptrCast(*const IGameStatistics, self), categoryIndex, pTitle);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGameStatistics_GetStatistic(self: *const T, categoryIndex: u16, statIndex: u16, pName: ?*?PWSTR, pValue: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IGameStatistics.VTable, self.vtable).GetStatistic(@ptrCast(*const IGameStatistics, self), categoryIndex, statIndex, pName, pValue);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGameStatistics_SetStatistic(self: *const T, categoryIndex: u16, statIndex: u16, name: ?[*:0]const u16, value: ?[*:0]const u16) callconv(.Inline) HRESULT {
return @ptrCast(*const IGameStatistics.VTable, self.vtable).SetStatistic(@ptrCast(*const IGameStatistics, self), categoryIndex, statIndex, name, value);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGameStatistics_Save(self: *const T, trackChanges: BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IGameStatistics.VTable, self.vtable).Save(@ptrCast(*const IGameStatistics, self), trackChanges);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGameStatistics_SetLastPlayedCategory(self: *const T, categoryIndex: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IGameStatistics.VTable, self.vtable).SetLastPlayedCategory(@ptrCast(*const IGameStatistics, self), categoryIndex);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGameStatistics_GetLastPlayedCategory(self: *const T, pCategoryIndex: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IGameStatistics.VTable, self.vtable).GetLastPlayedCategory(@ptrCast(*const IGameStatistics, self), pCategoryIndex);
}
};}
pub usingnamespace MethodMixin(@This());
};
const IID_IGameStatisticsMgr_Value = @import("zig.zig").Guid.initString("aff3ea11-e70e-407d-95dd-35e612c41ce2");
pub const IID_IGameStatisticsMgr = &IID_IGameStatisticsMgr_Value;
pub const IGameStatisticsMgr = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
GetGameStatistics: fn(
self: *const IGameStatisticsMgr,
GDFBinaryPath: ?[*:0]const u16,
openType: GAMESTATS_OPEN_TYPE,
pOpenResult: ?*GAMESTATS_OPEN_RESULT,
ppiStats: ?*?*IGameStatistics,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
RemoveGameStatistics: fn(
self: *const IGameStatisticsMgr,
GDFBinaryPath: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGameStatisticsMgr_GetGameStatistics(self: *const T, GDFBinaryPath: ?[*:0]const u16, openType: GAMESTATS_OPEN_TYPE, pOpenResult: ?*GAMESTATS_OPEN_RESULT, ppiStats: ?*?*IGameStatistics) callconv(.Inline) HRESULT {
return @ptrCast(*const IGameStatisticsMgr.VTable, self.vtable).GetGameStatistics(@ptrCast(*const IGameStatisticsMgr, self), GDFBinaryPath, openType, pOpenResult, ppiStats);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGameStatisticsMgr_RemoveGameStatistics(self: *const T, GDFBinaryPath: ?[*:0]const u16) callconv(.Inline) HRESULT {
return @ptrCast(*const IGameStatisticsMgr.VTable, self.vtable).RemoveGameStatistics(@ptrCast(*const IGameStatisticsMgr, self), GDFBinaryPath);
}
};}
pub usingnamespace MethodMixin(@This());
};
const IID_IGameExplorer2_Value = @import("zig.zig").Guid.initString("86874aa7-a1ed-450d-a7eb-b89e20b2fff3");
pub const IID_IGameExplorer2 = &IID_IGameExplorer2_Value;
pub const IGameExplorer2 = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
InstallGame: fn(
self: *const IGameExplorer2,
binaryGDFPath: ?[*:0]const u16,
installDirectory: ?[*:0]const u16,
installScope: GAME_INSTALL_SCOPE,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
UninstallGame: fn(
self: *const IGameExplorer2,
binaryGDFPath: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
CheckAccess: fn(
self: *const IGameExplorer2,
binaryGDFPath: ?[*:0]const u16,
pHasAccess: ?*BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGameExplorer2_InstallGame(self: *const T, binaryGDFPath: ?[*:0]const u16, installDirectory: ?[*:0]const u16, installScope: GAME_INSTALL_SCOPE) callconv(.Inline) HRESULT {
return @ptrCast(*const IGameExplorer2.VTable, self.vtable).InstallGame(@ptrCast(*const IGameExplorer2, self), binaryGDFPath, installDirectory, installScope);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGameExplorer2_UninstallGame(self: *const T, binaryGDFPath: ?[*:0]const u16) callconv(.Inline) HRESULT {
return @ptrCast(*const IGameExplorer2.VTable, self.vtable).UninstallGame(@ptrCast(*const IGameExplorer2, self), binaryGDFPath);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGameExplorer2_CheckAccess(self: *const T, binaryGDFPath: ?[*:0]const u16, pHasAccess: ?*BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IGameExplorer2.VTable, self.vtable).CheckAccess(@ptrCast(*const IGameExplorer2, self), binaryGDFPath, pHasAccess);
}
};}
pub usingnamespace MethodMixin(@This());
};
pub const GAMING_DEVICE_VENDOR_ID = enum(i32) {
NONE = 0,
MICROSOFT = -1024700366,
};
pub const GAMING_DEVICE_VENDOR_ID_NONE = GAMING_DEVICE_VENDOR_ID.NONE;
pub const GAMING_DEVICE_VENDOR_ID_MICROSOFT = GAMING_DEVICE_VENDOR_ID.MICROSOFT;
pub const GAMING_DEVICE_DEVICE_ID = enum(i32) {
NONE = 0,
XBOX_ONE = 1988865574,
XBOX_ONE_S = 712204761,
XBOX_ONE_X = 1523980231,
XBOX_ONE_X_DEVKIT = 284675555,
};
pub const GAMING_DEVICE_DEVICE_ID_NONE = GAMING_DEVICE_DEVICE_ID.NONE;
pub const GAMING_DEVICE_DEVICE_ID_XBOX_ONE = GAMING_DEVICE_DEVICE_ID.XBOX_ONE;
pub const GAMING_DEVICE_DEVICE_ID_XBOX_ONE_S = GAMING_DEVICE_DEVICE_ID.XBOX_ONE_S;
pub const GAMING_DEVICE_DEVICE_ID_XBOX_ONE_X = GAMING_DEVICE_DEVICE_ID.XBOX_ONE_X;
pub const GAMING_DEVICE_DEVICE_ID_XBOX_ONE_X_DEVKIT = GAMING_DEVICE_DEVICE_ID.XBOX_ONE_X_DEVKIT;
pub const GAMING_DEVICE_MODEL_INFORMATION = extern struct {
vendorId: GAMING_DEVICE_VENDOR_ID,
deviceId: GAMING_DEVICE_DEVICE_ID,
};
pub const GameUICompletionRoutine = fn(
returnCode: HRESULT,
context: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) void;
pub const PlayerPickerUICompletionRoutine = fn(
returnCode: HRESULT,
context: ?*anyopaque,
selectedXuids: [*]const ?HSTRING,
selectedXuidsCount: usize,
) callconv(@import("std").os.windows.WINAPI) void;
pub const KnownGamingPrivileges = enum(i32) {
BROADCAST = 190,
VIEW_FRIENDS_LIST = 197,
GAME_DVR = 198,
SHARE_KINECT_CONTENT = 199,
MULTIPLAYER_PARTIES = 203,
COMMUNICATION_VOICE_INGAME = 205,
COMMUNICATION_VOICE_SKYPE = 206,
CLOUD_GAMING_MANAGE_SESSION = 207,
CLOUD_GAMING_JOIN_SESSION = 208,
CLOUD_SAVED_GAMES = 209,
SHARE_CONTENT = 211,
PREMIUM_CONTENT = 214,
SUBSCRIPTION_CONTENT = 219,
SOCIAL_NETWORK_SHARING = 220,
PREMIUM_VIDEO = 224,
VIDEO_COMMUNICATIONS = 235,
PURCHASE_CONTENT = 245,
USER_CREATED_CONTENT = 247,
PROFILE_VIEWING = 249,
COMMUNICATIONS = 252,
MULTIPLAYER_SESSIONS = 254,
ADD_FRIEND = 255,
};
pub const XPRIVILEGE_BROADCAST = KnownGamingPrivileges.BROADCAST;
pub const XPRIVILEGE_VIEW_FRIENDS_LIST = KnownGamingPrivileges.VIEW_FRIENDS_LIST;
pub const XPRIVILEGE_GAME_DVR = KnownGamingPrivileges.GAME_DVR;
pub const XPRIVILEGE_SHARE_KINECT_CONTENT = KnownGamingPrivileges.SHARE_KINECT_CONTENT;
pub const XPRIVILEGE_MULTIPLAYER_PARTIES = KnownGamingPrivileges.MULTIPLAYER_PARTIES;
pub const XPRIVILEGE_COMMUNICATION_VOICE_INGAME = KnownGamingPrivileges.COMMUNICATION_VOICE_INGAME;
pub const XPRIVILEGE_COMMUNICATION_VOICE_SKYPE = KnownGamingPrivileges.COMMUNICATION_VOICE_SKYPE;
pub const XPRIVILEGE_CLOUD_GAMING_MANAGE_SESSION = KnownGamingPrivileges.CLOUD_GAMING_MANAGE_SESSION;
pub const XPRIVILEGE_CLOUD_GAMING_JOIN_SESSION = KnownGamingPrivileges.CLOUD_GAMING_JOIN_SESSION;
pub const XPRIVILEGE_CLOUD_SAVED_GAMES = KnownGamingPrivileges.CLOUD_SAVED_GAMES;
pub const XPRIVILEGE_SHARE_CONTENT = KnownGamingPrivileges.SHARE_CONTENT;
pub const XPRIVILEGE_PREMIUM_CONTENT = KnownGamingPrivileges.PREMIUM_CONTENT;
pub const XPRIVILEGE_SUBSCRIPTION_CONTENT = KnownGamingPrivileges.SUBSCRIPTION_CONTENT;
pub const XPRIVILEGE_SOCIAL_NETWORK_SHARING = KnownGamingPrivileges.SOCIAL_NETWORK_SHARING;
pub const XPRIVILEGE_PREMIUM_VIDEO = KnownGamingPrivileges.PREMIUM_VIDEO;
pub const XPRIVILEGE_VIDEO_COMMUNICATIONS = KnownGamingPrivileges.VIDEO_COMMUNICATIONS;
pub const XPRIVILEGE_PURCHASE_CONTENT = KnownGamingPrivileges.PURCHASE_CONTENT;
pub const XPRIVILEGE_USER_CREATED_CONTENT = KnownGamingPrivileges.USER_CREATED_CONTENT;
pub const XPRIVILEGE_PROFILE_VIEWING = KnownGamingPrivileges.PROFILE_VIEWING;
pub const XPRIVILEGE_COMMUNICATIONS = KnownGamingPrivileges.COMMUNICATIONS;
pub const XPRIVILEGE_MULTIPLAYER_SESSIONS = KnownGamingPrivileges.MULTIPLAYER_SESSIONS;
pub const XPRIVILEGE_ADD_FRIEND = KnownGamingPrivileges.ADD_FRIEND;
const CLSID_XblIdpAuthManager_Value = @import("zig.zig").Guid.initString("ce23534b-56d8-4978-86a2-7ee570640468");
pub const CLSID_XblIdpAuthManager = &CLSID_XblIdpAuthManager_Value;
const CLSID_XblIdpAuthTokenResult_Value = @import("zig.zig").Guid.initString("9f493441-744a-410c-ae2b-9a22f7c7731f");
pub const CLSID_XblIdpAuthTokenResult = &CLSID_XblIdpAuthTokenResult_Value;
pub const XBL_IDP_AUTH_TOKEN_STATUS = enum(i32) {
SUCCESS = 0,
OFFLINE_SUCCESS = 1,
NO_ACCOUNT_SET = 2,
LOAD_MSA_ACCOUNT_FAILED = 3,
XBOX_VETO = 4,
MSA_INTERRUPT = 5,
OFFLINE_NO_CONSENT = 6,
VIEW_NOT_SET = 7,
UNKNOWN = -1,
};
pub const XBL_IDP_AUTH_TOKEN_STATUS_SUCCESS = XBL_IDP_AUTH_TOKEN_STATUS.SUCCESS;
pub const XBL_IDP_AUTH_TOKEN_STATUS_OFFLINE_SUCCESS = XBL_IDP_AUTH_TOKEN_STATUS.OFFLINE_SUCCESS;
pub const XBL_IDP_AUTH_TOKEN_STATUS_NO_ACCOUNT_SET = XBL_IDP_AUTH_TOKEN_STATUS.NO_ACCOUNT_SET;
pub const XBL_IDP_AUTH_TOKEN_STATUS_LOAD_MSA_ACCOUNT_FAILED = XBL_IDP_AUTH_TOKEN_STATUS.LOAD_MSA_ACCOUNT_FAILED;
pub const XBL_IDP_AUTH_TOKEN_STATUS_XBOX_VETO = XBL_IDP_AUTH_TOKEN_STATUS.XBOX_VETO;
pub const XBL_IDP_AUTH_TOKEN_STATUS_MSA_INTERRUPT = XBL_IDP_AUTH_TOKEN_STATUS.MSA_INTERRUPT;
pub const XBL_IDP_AUTH_TOKEN_STATUS_OFFLINE_NO_CONSENT = XBL_IDP_AUTH_TOKEN_STATUS.OFFLINE_NO_CONSENT;
pub const XBL_IDP_AUTH_TOKEN_STATUS_VIEW_NOT_SET = XBL_IDP_AUTH_TOKEN_STATUS.VIEW_NOT_SET;
pub const XBL_IDP_AUTH_TOKEN_STATUS_UNKNOWN = XBL_IDP_AUTH_TOKEN_STATUS.UNKNOWN;
const IID_IXblIdpAuthManager_Value = @import("zig.zig").Guid.initString("eb5ddb08-8bbf-449b-ac21-b02ddeb3b136");
pub const IID_IXblIdpAuthManager = &IID_IXblIdpAuthManager_Value;
pub const IXblIdpAuthManager = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
SetGamerAccount: fn(
self: *const IXblIdpAuthManager,
msaAccountId: ?[*:0]const u16,
xuid: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetGamerAccount: fn(
self: *const IXblIdpAuthManager,
msaAccountId: ?*?PWSTR,
xuid: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetAppViewInitialized: fn(
self: *const IXblIdpAuthManager,
appSid: ?[*:0]const u16,
msaAccountId: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetEnvironment: fn(
self: *const IXblIdpAuthManager,
environment: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetSandbox: fn(
self: *const IXblIdpAuthManager,
sandbox: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetTokenAndSignatureWithTokenResult: fn(
self: *const IXblIdpAuthManager,
msaAccountId: ?[*:0]const u16,
appSid: ?[*:0]const u16,
msaTarget: ?[*:0]const u16,
msaPolicy: ?[*:0]const u16,
httpMethod: ?[*:0]const u16,
uri: ?[*:0]const u16,
headers: ?[*:0]const u16,
body: [*:0]u8,
bodySize: u32,
forceRefresh: BOOL,
result: ?*?*IXblIdpAuthTokenResult,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IXblIdpAuthManager_SetGamerAccount(self: *const T, msaAccountId: ?[*:0]const u16, xuid: ?[*:0]const u16) callconv(.Inline) HRESULT {
return @ptrCast(*const IXblIdpAuthManager.VTable, self.vtable).SetGamerAccount(@ptrCast(*const IXblIdpAuthManager, self), msaAccountId, xuid);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IXblIdpAuthManager_GetGamerAccount(self: *const T, msaAccountId: ?*?PWSTR, xuid: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IXblIdpAuthManager.VTable, self.vtable).GetGamerAccount(@ptrCast(*const IXblIdpAuthManager, self), msaAccountId, xuid);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IXblIdpAuthManager_SetAppViewInitialized(self: *const T, appSid: ?[*:0]const u16, msaAccountId: ?[*:0]const u16) callconv(.Inline) HRESULT {
return @ptrCast(*const IXblIdpAuthManager.VTable, self.vtable).SetAppViewInitialized(@ptrCast(*const IXblIdpAuthManager, self), appSid, msaAccountId);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IXblIdpAuthManager_GetEnvironment(self: *const T, environment: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IXblIdpAuthManager.VTable, self.vtable).GetEnvironment(@ptrCast(*const IXblIdpAuthManager, self), environment);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IXblIdpAuthManager_GetSandbox(self: *const T, sandbox: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IXblIdpAuthManager.VTable, self.vtable).GetSandbox(@ptrCast(*const IXblIdpAuthManager, self), sandbox);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IXblIdpAuthManager_GetTokenAndSignatureWithTokenResult(self: *const T, msaAccountId: ?[*:0]const u16, appSid: ?[*:0]const u16, msaTarget: ?[*:0]const u16, msaPolicy: ?[*:0]const u16, httpMethod: ?[*:0]const u16, uri: ?[*:0]const u16, headers: ?[*:0]const u16, body: [*:0]u8, bodySize: u32, forceRefresh: BOOL, result: ?*?*IXblIdpAuthTokenResult) callconv(.Inline) HRESULT {
return @ptrCast(*const IXblIdpAuthManager.VTable, self.vtable).GetTokenAndSignatureWithTokenResult(@ptrCast(*const IXblIdpAuthManager, self), msaAccountId, appSid, msaTarget, msaPolicy, httpMethod, uri, headers, body, bodySize, forceRefresh, result);
}
};}
pub usingnamespace MethodMixin(@This());
};
const IID_IXblIdpAuthTokenResult_Value = @import("zig.zig").Guid.initString("46ce0225-f267-4d68-b299-b2762552dec1");
pub const IID_IXblIdpAuthTokenResult = &IID_IXblIdpAuthTokenResult_Value;
pub const IXblIdpAuthTokenResult = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
GetStatus: fn(
self: *const IXblIdpAuthTokenResult,
status: ?*XBL_IDP_AUTH_TOKEN_STATUS,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetErrorCode: fn(
self: *const IXblIdpAuthTokenResult,
errorCode: ?*HRESULT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetToken: fn(
self: *const IXblIdpAuthTokenResult,
token: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetSignature: fn(
self: *const IXblIdpAuthTokenResult,
signature: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetSandbox: fn(
self: *const IXblIdpAuthTokenResult,
sandbox: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetEnvironment: fn(
self: *const IXblIdpAuthTokenResult,
environment: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetMsaAccountId: fn(
self: *const IXblIdpAuthTokenResult,
msaAccountId: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetXuid: fn(
self: *const IXblIdpAuthTokenResult,
xuid: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetGamertag: fn(
self: *const IXblIdpAuthTokenResult,
gamertag: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetAgeGroup: fn(
self: *const IXblIdpAuthTokenResult,
ageGroup: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetPrivileges: fn(
self: *const IXblIdpAuthTokenResult,
privileges: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetMsaTarget: fn(
self: *const IXblIdpAuthTokenResult,
msaTarget: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetMsaPolicy: fn(
self: *const IXblIdpAuthTokenResult,
msaPolicy: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetMsaAppId: fn(
self: *const IXblIdpAuthTokenResult,
msaAppId: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetRedirect: fn(
self: *const IXblIdpAuthTokenResult,
redirect: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetMessage: fn(
self: *const IXblIdpAuthTokenResult,
message: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetHelpId: fn(
self: *const IXblIdpAuthTokenResult,
helpId: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetEnforcementBans: fn(
self: *const IXblIdpAuthTokenResult,
enforcementBans: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetRestrictions: fn(
self: *const IXblIdpAuthTokenResult,
restrictions: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetTitleRestrictions: fn(
self: *const IXblIdpAuthTokenResult,
titleRestrictions: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IXblIdpAuthTokenResult_GetStatus(self: *const T, status: ?*XBL_IDP_AUTH_TOKEN_STATUS) callconv(.Inline) HRESULT {
return @ptrCast(*const IXblIdpAuthTokenResult.VTable, self.vtable).GetStatus(@ptrCast(*const IXblIdpAuthTokenResult, self), status);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IXblIdpAuthTokenResult_GetErrorCode(self: *const T, errorCode: ?*HRESULT) callconv(.Inline) HRESULT {
return @ptrCast(*const IXblIdpAuthTokenResult.VTable, self.vtable).GetErrorCode(@ptrCast(*const IXblIdpAuthTokenResult, self), errorCode);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IXblIdpAuthTokenResult_GetToken(self: *const T, token: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IXblIdpAuthTokenResult.VTable, self.vtable).GetToken(@ptrCast(*const IXblIdpAuthTokenResult, self), token);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IXblIdpAuthTokenResult_GetSignature(self: *const T, signature: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IXblIdpAuthTokenResult.VTable, self.vtable).GetSignature(@ptrCast(*const IXblIdpAuthTokenResult, self), signature);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IXblIdpAuthTokenResult_GetSandbox(self: *const T, sandbox: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IXblIdpAuthTokenResult.VTable, self.vtable).GetSandbox(@ptrCast(*const IXblIdpAuthTokenResult, self), sandbox);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IXblIdpAuthTokenResult_GetEnvironment(self: *const T, environment: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IXblIdpAuthTokenResult.VTable, self.vtable).GetEnvironment(@ptrCast(*const IXblIdpAuthTokenResult, self), environment);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IXblIdpAuthTokenResult_GetMsaAccountId(self: *const T, msaAccountId: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IXblIdpAuthTokenResult.VTable, self.vtable).GetMsaAccountId(@ptrCast(*const IXblIdpAuthTokenResult, self), msaAccountId);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IXblIdpAuthTokenResult_GetXuid(self: *const T, xuid: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IXblIdpAuthTokenResult.VTable, self.vtable).GetXuid(@ptrCast(*const IXblIdpAuthTokenResult, self), xuid);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IXblIdpAuthTokenResult_GetGamertag(self: *const T, gamertag: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IXblIdpAuthTokenResult.VTable, self.vtable).GetGamertag(@ptrCast(*const IXblIdpAuthTokenResult, self), gamertag);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IXblIdpAuthTokenResult_GetAgeGroup(self: *const T, ageGroup: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IXblIdpAuthTokenResult.VTable, self.vtable).GetAgeGroup(@ptrCast(*const IXblIdpAuthTokenResult, self), ageGroup);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IXblIdpAuthTokenResult_GetPrivileges(self: *const T, privileges: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IXblIdpAuthTokenResult.VTable, self.vtable).GetPrivileges(@ptrCast(*const IXblIdpAuthTokenResult, self), privileges);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IXblIdpAuthTokenResult_GetMsaTarget(self: *const T, msaTarget: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IXblIdpAuthTokenResult.VTable, self.vtable).GetMsaTarget(@ptrCast(*const IXblIdpAuthTokenResult, self), msaTarget);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IXblIdpAuthTokenResult_GetMsaPolicy(self: *const T, msaPolicy: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IXblIdpAuthTokenResult.VTable, self.vtable).GetMsaPolicy(@ptrCast(*const IXblIdpAuthTokenResult, self), msaPolicy);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IXblIdpAuthTokenResult_GetMsaAppId(self: *const T, msaAppId: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IXblIdpAuthTokenResult.VTable, self.vtable).GetMsaAppId(@ptrCast(*const IXblIdpAuthTokenResult, self), msaAppId);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IXblIdpAuthTokenResult_GetRedirect(self: *const T, redirect: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IXblIdpAuthTokenResult.VTable, self.vtable).GetRedirect(@ptrCast(*const IXblIdpAuthTokenResult, self), redirect);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IXblIdpAuthTokenResult_GetMessage(self: *const T, message: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IXblIdpAuthTokenResult.VTable, self.vtable).GetMessage(@ptrCast(*const IXblIdpAuthTokenResult, self), message);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IXblIdpAuthTokenResult_GetHelpId(self: *const T, helpId: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IXblIdpAuthTokenResult.VTable, self.vtable).GetHelpId(@ptrCast(*const IXblIdpAuthTokenResult, self), helpId);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IXblIdpAuthTokenResult_GetEnforcementBans(self: *const T, enforcementBans: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IXblIdpAuthTokenResult.VTable, self.vtable).GetEnforcementBans(@ptrCast(*const IXblIdpAuthTokenResult, self), enforcementBans);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IXblIdpAuthTokenResult_GetRestrictions(self: *const T, restrictions: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IXblIdpAuthTokenResult.VTable, self.vtable).GetRestrictions(@ptrCast(*const IXblIdpAuthTokenResult, self), restrictions);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IXblIdpAuthTokenResult_GetTitleRestrictions(self: *const T, titleRestrictions: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IXblIdpAuthTokenResult.VTable, self.vtable).GetTitleRestrictions(@ptrCast(*const IXblIdpAuthTokenResult, self), titleRestrictions);
}
};}
pub usingnamespace MethodMixin(@This());
};
const IID_IXblIdpAuthTokenResult2_Value = @import("zig.zig").Guid.initString("75d760b0-60b9-412d-994f-26b2cd5f7812");
pub const IID_IXblIdpAuthTokenResult2 = &IID_IXblIdpAuthTokenResult2_Value;
pub const IXblIdpAuthTokenResult2 = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
GetModernGamertag: fn(
self: *const IXblIdpAuthTokenResult2,
value: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetModernGamertagSuffix: fn(
self: *const IXblIdpAuthTokenResult2,
value: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetUniqueModernGamertag: fn(
self: *const IXblIdpAuthTokenResult2,
value: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IXblIdpAuthTokenResult2_GetModernGamertag(self: *const T, value: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IXblIdpAuthTokenResult2.VTable, self.vtable).GetModernGamertag(@ptrCast(*const IXblIdpAuthTokenResult2, self), value);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IXblIdpAuthTokenResult2_GetModernGamertagSuffix(self: *const T, value: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IXblIdpAuthTokenResult2.VTable, self.vtable).GetModernGamertagSuffix(@ptrCast(*const IXblIdpAuthTokenResult2, self), value);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IXblIdpAuthTokenResult2_GetUniqueModernGamertag(self: *const T, value: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IXblIdpAuthTokenResult2.VTable, self.vtable).GetUniqueModernGamertag(@ptrCast(*const IXblIdpAuthTokenResult2, self), value);
}
};}
pub usingnamespace MethodMixin(@This());
};
//--------------------------------------------------------------------------------
// Section: Functions (30)
//--------------------------------------------------------------------------------
pub extern "api-ms-win-gaming-expandedresources-l1-1-0" fn HasExpandedResources(
hasExpandedResources: ?*BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
pub extern "api-ms-win-gaming-expandedresources-l1-1-0" fn GetExpandedResourceExclusiveCpuCount(
exclusiveCpuCount: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
pub extern "api-ms-win-gaming-expandedresources-l1-1-0" fn ReleaseExclusiveCpuSets(
) callconv(@import("std").os.windows.WINAPI) HRESULT;
pub extern "api-ms-win-gaming-deviceinformation-l1-1-0" fn GetGamingDeviceModelInformation(
information: ?*GAMING_DEVICE_MODEL_INFORMATION,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
pub extern "api-ms-win-gaming-tcui-l1-1-0" fn ShowGameInviteUI(
serviceConfigurationId: ?HSTRING,
sessionTemplateName: ?HSTRING,
sessionId: ?HSTRING,
invitationDisplayText: ?HSTRING,
completionRoutine: ?GameUICompletionRoutine,
context: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
pub extern "api-ms-win-gaming-tcui-l1-1-0" fn ShowPlayerPickerUI(
promptDisplayText: ?HSTRING,
xuids: [*]const ?HSTRING,
xuidsCount: usize,
preSelectedXuids: ?[*]const ?HSTRING,
preSelectedXuidsCount: usize,
minSelectionCount: usize,
maxSelectionCount: usize,
completionRoutine: ?PlayerPickerUICompletionRoutine,
context: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
pub extern "api-ms-win-gaming-tcui-l1-1-0" fn ShowProfileCardUI(
targetUserXuid: ?HSTRING,
completionRoutine: ?GameUICompletionRoutine,
context: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
pub extern "api-ms-win-gaming-tcui-l1-1-0" fn ShowChangeFriendRelationshipUI(
targetUserXuid: ?HSTRING,
completionRoutine: ?GameUICompletionRoutine,
context: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
pub extern "api-ms-win-gaming-tcui-l1-1-0" fn ShowTitleAchievementsUI(
titleId: u32,
completionRoutine: ?GameUICompletionRoutine,
context: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
pub extern "api-ms-win-gaming-tcui-l1-1-0" fn ProcessPendingGameUI(
waitForCompletion: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
pub extern "api-ms-win-gaming-tcui-l1-1-0" fn TryCancelPendingGameUI(
) callconv(@import("std").os.windows.WINAPI) BOOL;
pub extern "api-ms-win-gaming-tcui-l1-1-1" fn CheckGamingPrivilegeWithUI(
privilegeId: u32,
scope: ?HSTRING,
policy: ?HSTRING,
friendlyMessage: ?HSTRING,
completionRoutine: ?GameUICompletionRoutine,
context: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
pub extern "api-ms-win-gaming-tcui-l1-1-1" fn CheckGamingPrivilegeSilently(
privilegeId: u32,
scope: ?HSTRING,
policy: ?HSTRING,
hasPrivilege: ?*BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
pub extern "api-ms-win-gaming-tcui-l1-1-2" fn ShowGameInviteUIForUser(
user: ?*IInspectable,
serviceConfigurationId: ?HSTRING,
sessionTemplateName: ?HSTRING,
sessionId: ?HSTRING,
invitationDisplayText: ?HSTRING,
completionRoutine: ?GameUICompletionRoutine,
context: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
pub extern "api-ms-win-gaming-tcui-l1-1-2" fn ShowPlayerPickerUIForUser(
user: ?*IInspectable,
promptDisplayText: ?HSTRING,
xuids: [*]const ?HSTRING,
xuidsCount: usize,
preSelectedXuids: ?[*]const ?HSTRING,
preSelectedXuidsCount: usize,
minSelectionCount: usize,
maxSelectionCount: usize,
completionRoutine: ?PlayerPickerUICompletionRoutine,
context: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
pub extern "api-ms-win-gaming-tcui-l1-1-2" fn ShowProfileCardUIForUser(
user: ?*IInspectable,
targetUserXuid: ?HSTRING,
completionRoutine: ?GameUICompletionRoutine,
context: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
pub extern "api-ms-win-gaming-tcui-l1-1-2" fn ShowChangeFriendRelationshipUIForUser(
user: ?*IInspectable,
targetUserXuid: ?HSTRING,
completionRoutine: ?GameUICompletionRoutine,
context: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
pub extern "api-ms-win-gaming-tcui-l1-1-2" fn ShowTitleAchievementsUIForUser(
user: ?*IInspectable,
titleId: u32,
completionRoutine: ?GameUICompletionRoutine,
context: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
pub extern "api-ms-win-gaming-tcui-l1-1-2" fn CheckGamingPrivilegeWithUIForUser(
user: ?*IInspectable,
privilegeId: u32,
scope: ?HSTRING,
policy: ?HSTRING,
friendlyMessage: ?HSTRING,
completionRoutine: ?GameUICompletionRoutine,
context: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
pub extern "api-ms-win-gaming-tcui-l1-1-2" fn CheckGamingPrivilegeSilentlyForUser(
user: ?*IInspectable,
privilegeId: u32,
scope: ?HSTRING,
policy: ?HSTRING,
hasPrivilege: ?*BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
pub extern "api-ms-win-gaming-tcui-l1-1-3" fn ShowGameInviteUIWithContext(
serviceConfigurationId: ?HSTRING,
sessionTemplateName: ?HSTRING,
sessionId: ?HSTRING,
invitationDisplayText: ?HSTRING,
customActivationContext: ?HSTRING,
completionRoutine: ?GameUICompletionRoutine,
context: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
pub extern "api-ms-win-gaming-tcui-l1-1-3" fn ShowGameInviteUIWithContextForUser(
user: ?*IInspectable,
serviceConfigurationId: ?HSTRING,
sessionTemplateName: ?HSTRING,
sessionId: ?HSTRING,
invitationDisplayText: ?HSTRING,
customActivationContext: ?HSTRING,
completionRoutine: ?GameUICompletionRoutine,
context: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
pub extern "api-ms-win-gaming-tcui-l1-1-4" fn ShowGameInfoUI(
titleId: u32,
completionRoutine: ?GameUICompletionRoutine,
context: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
pub extern "api-ms-win-gaming-tcui-l1-1-4" fn ShowGameInfoUIForUser(
user: ?*IInspectable,
titleId: u32,
completionRoutine: ?GameUICompletionRoutine,
context: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
pub extern "api-ms-win-gaming-tcui-l1-1-4" fn ShowFindFriendsUI(
completionRoutine: ?GameUICompletionRoutine,
context: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
pub extern "api-ms-win-gaming-tcui-l1-1-4" fn ShowFindFriendsUIForUser(
user: ?*IInspectable,
completionRoutine: ?GameUICompletionRoutine,
context: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
pub extern "api-ms-win-gaming-tcui-l1-1-4" fn ShowCustomizeUserProfileUI(
completionRoutine: ?GameUICompletionRoutine,
context: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
pub extern "api-ms-win-gaming-tcui-l1-1-4" fn ShowCustomizeUserProfileUIForUser(
user: ?*IInspectable,
completionRoutine: ?GameUICompletionRoutine,
context: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
pub extern "api-ms-win-gaming-tcui-l1-1-4" fn ShowUserSettingsUI(
completionRoutine: ?GameUICompletionRoutine,
context: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
pub extern "api-ms-win-gaming-tcui-l1-1-4" fn ShowUserSettingsUIForUser(
user: ?*IInspectable,
completionRoutine: ?GameUICompletionRoutine,
context: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
//--------------------------------------------------------------------------------
// Section: Unicode Aliases (0)
//--------------------------------------------------------------------------------
const thismodule = @This();
pub usingnamespace switch (@import("zig.zig").unicode_mode) {
.ansi => struct {
},
.wide => struct {
},
.unspecified => if (@import("builtin").is_test) struct {
} else struct {
},
};
//--------------------------------------------------------------------------------
// Section: Imports (8)
//--------------------------------------------------------------------------------
const Guid = @import("zig.zig").Guid;
const BOOL = @import("foundation.zig").BOOL;
const BSTR = @import("foundation.zig").BSTR;
const HRESULT = @import("foundation.zig").HRESULT;
const HSTRING = @import("system/win_rt.zig").HSTRING;
const IInspectable = @import("system/win_rt.zig").IInspectable;
const IUnknown = @import("system/com.zig").IUnknown;
const PWSTR = @import("foundation.zig").PWSTR;
test {
// The following '_ = <FuncPtrType>' lines are a workaround for https://github.com/ziglang/zig/issues/4476
if (@hasDecl(@This(), "GameUICompletionRoutine")) { _ = GameUICompletionRoutine; }
if (@hasDecl(@This(), "PlayerPickerUICompletionRoutine")) { _ = PlayerPickerUICompletionRoutine; }
@setEvalBranchQuota(
@import("std").meta.declarations(@This()).len * 3
);
// reference all the pub declarations
if (!@import("builtin").is_test) return;
inline for (@import("std").meta.declarations(@This())) |decl| {
if (decl.is_pub) {
_ = decl;
}
}
} | win32/gaming.zig |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.