licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 1.1.0 | 4aaf8a88550d628b8142ed6772901fdf9cef55fd | code | 1396 | if (Sys.islinux())
@testset "c api memory leak test" begin
function get_memory_usage()
open("/proc/$(getpid())/statm") do io
return split(read(io, String))[1]
end
end
file = joinpath(videodir, "annie_oakley.ogg")
@testset "open file test" begin
check_size = 10
usage_vec = Vector{String}(undef, check_size)
for i in 1:check_size
f = VideoIO.openvideo(file)
close(f)
GC.gc()
usage_vec[i] = get_memory_usage()
end
@debug "open file test" usage_vec
@test usage_vec[end-1] == usage_vec[end]
if usage_vec[end-1] != usage_vec[end]
@error "open file test" usage_vec
end
end
@testset "open and read file test" begin
check_size = 10
usage_vec = Vector{String}(undef, check_size)
for i in 1:check_size
f = VideoIO.openvideo(file)
img = read(f)
close(f)
GC.gc()
usage_vec[i] = get_memory_usage()
end
@test usage_vec[end-1] == usage_vec[end]
if usage_vec[end-1] != usage_vec[end]
@error "open and read file test" usage_vec
end
end
end
end
| VideoIO | https://github.com/JuliaIO/VideoIO.jl.git |
|
[
"MIT"
] | 1.1.0 | 4aaf8a88550d628b8142ed6772901fdf9cef55fd | code | 12740 | @testset "Reading of various example file formats" begin
swscale_options = (sws_flags="accurate_rnd+full_chroma_inp+full_chroma_int",)
for testvid in values(VideoIO.TestVideos.videofiles)
name = testvid.name
test_frameno = testvid.testframe
@testset "Reading $(testvid.name)" begin
testvid_path = joinpath(VideoIO.TestVideos.videodir, name)
comparison_frame = make_comparison_frame_png(load, testvid_path, test_frameno)
f = VideoIO.testvideo(testvid_path)
v = VideoIO.openvideo(f; swscale_options=swscale_options)
try
time_seconds = VideoIO.gettime(v)
@test time_seconds == 0
width, height = VideoIO.out_frame_size(v)
@test VideoIO.width(v) == width
@test VideoIO.height(v) == height
@test VideoIO.out_frame_eltype(v) == RGB{N0f8}
if size(comparison_frame, 1) > height
trimmed_comparison_frame = comparison_frame[1+size(comparison_frame, 1)-height:end, :]
else
trimmed_comparison_frame = comparison_frame
end
# Find the first non-trivial image
first_img = read(v)
first_time = VideoIO.gettime(v)
seekstart(v)
img = read(v)
@test VideoIO.gettime(v) == first_time
@test img == first_img
@test size(img) == VideoIO.out_frame_size(v)[[2, 1]]
# First read(v) then framerate(v)
# https://github.com/JuliaIO/VideoIO.jl/issues/349
if !isnothing(testvid.fps)
@test isapprox(VideoIO.framerate(v), testvid.fps, rtol=0.01)
else
@test VideoIO.framerate(v) != 0
end
# no scaling currently
@test VideoIO.out_frame_size(v) == VideoIO.raw_frame_size(v)
@test VideoIO.raw_pixel_format(v) == 0 # true for current test videos
i = 1
while i < test_frameno
read!(v, img)
i += 1
end
test_compare_frames(img, trimmed_comparison_frame, required_accuracy)
test_time = VideoIO.gettime(v)
seek(v, test_time)
raw_img = parent(img)
read!(v, raw_img) # VideoReader should accept scanline-major images
test_compare_frames(img, trimmed_comparison_frame, required_accuracy)
@test VideoIO.gettime(v) == test_time
if size(img, 1) != size(img, 2)
# Passing an arrray that is not scanline-major does not work
@test_throws ArgumentError read!(v, similar(img))
@test VideoIO.gettime(v) == test_time
end
@test_throws(ArgumentError, read!(v, similar(raw_img, size(raw_img) .- 1)))
@test_throws MethodError read!(v, similar(raw_img, Rational{Int}))
@test_throws ArgumentError read!(v, similar(raw_img, Gray{N0f8}))
@test VideoIO.gettime(v) == test_time
seekstart(v)
for i in 1:50
read!(v, img)
end
fiftieth_frame = copy(img)
fiftytime = VideoIO.gettime(v)
while !eof(v)
read!(v, img)
end
seek(v, fiftytime)
read!(v, img)
@test img == fiftieth_frame
seekstart(v)
start_t = VideoIO.gettime(v)
@test start_t <= 0
buff, align = VideoIO.read_raw(v, 1)
@test VideoIO.out_bytes_size(v) == length(buff)
@test align == 1
buff_bak = copy(buff)
seekstart(v)
VideoIO.read_raw!(v, buff, 1)
last_time = VideoIO.gettime(v)
@test buff == buff_bak
@test_throws(ArgumentError, VideoIO.read_raw!(v, similar(buff, size(buff) .- 1)))
@test_throws MethodError VideoIO.read_raw!(v, similar(buff, Int))
@test VideoIO.gettime(v) == last_time
notranscode_buff = VideoIO.openvideo(read, testvid_path, transcode=false)
@test notranscode_buff == buff_bak
# read first frames again, and compare
read_frameno!(img, v, test_frameno)
test_compare_frames(img, trimmed_comparison_frame, required_accuracy)
# make sure read! works with both PermutedDimsArray and Array
# The above tests already use read! for PermutedDimsArray,
# so just test the type of img
@test typeof(img) <: PermutedDimsArray
img_p = parent(img)
@assert typeof(img_p) <: Array
# img is a view of img_p, so calling read! on img_p should alter img
#
# first, zero img out to be sure we get the desired result from
# calls to read on img_p!
fill!(img, zero(eltype(img)))
# Then get the first frame, which uses read!
read_frameno!(img_p, v, test_frameno)
# Finally compare the result to make sure it's right
test_compare_frames(img, trimmed_comparison_frame, required_accuracy)
# Skipping & frame counting
VideoIO.seekstart(v)
VideoIO.skipframe(v)
VideoIO.skipframes(v, 10)
@test VideoIO.counttotalframes(v) == VideoIO.TestVideos.videofiles[name].numframes
finally
close(f)
end
if occursin("annie_oakley", name)
framestack = VideoIO.load(testvid_path)
@test length(framestack) == VideoIO.TestVideos.videofiles[name].numframes
# TODO: Replace this with a content check as summarysize is not stable across julia versions
if VERSION < v"1.6.3" || VERSION > v"1.11.0-0"
@test_broken Base.summarysize(framestack) == VideoIO.TestVideos.videofiles[name].summarysize
else
@test Base.summarysize(framestack) == VideoIO.TestVideos.videofiles[name].summarysize
end
f = File{DataFormat{:OGG}}(testvid_path)
framestack = VideoIO.fileio_load(f)
@test length(framestack) == VideoIO.TestVideos.videofiles[name].numframes
# TODO: Replace this with a content check as summarysize is not stable across julia versions
if VERSION < v"1.6.3" || VERSION > v"1.11.0-0"
@test_broken Base.summarysize(framestack) == VideoIO.TestVideos.videofiles[name].summarysize
else
@test Base.summarysize(framestack) == VideoIO.TestVideos.videofiles[name].summarysize
end
path, io = mktemp()
f = File{DataFormat{:MP4}}(path * ".mp4")
VideoIO.fileio_save(f, framestack)
@test isfile(path * ".mp4")
@test stat(path * ".mp4").size > 0
framestack = nothing
GC.gc()
end
end
end
end
@memory_profile
@testset "Reading monochrome videos" begin
testvid_path = joinpath(VideoIO.TestVideos.videodir, "annie_oakley.ogg")
# Test that limited range YCbCr values are translated to "full range"
minp, maxp = VideoIO.openvideo(get_video_extrema, testvid_path, target_format=VideoIO.AV_PIX_FMT_GRAY8)
@test typeof(minp) == Gray{N0f8}
@test minp.val.i < 16
@test maxp.val.i > 235
# Disable automatic rescaling
minp, maxp = VideoIO.openvideo(
get_video_extrema,
testvid_path,
target_format=VideoIO.AV_PIX_FMT_GRAY8,
target_colorspace_details=VideoIO.VioColorspaceDetails(),
)
@test minp.val.i >= 16
@test maxp.val.i <= 235
GC.gc()
end
@memory_profile
@testset "Reading RGB video as monochrome" begin
@testset "Iterative" begin
io = VideoIO.testvideo("ladybird")
VideoIO.openvideo(io, target_format=VideoIO.AV_PIX_FMT_GRAY8) do f
img = read(f)
for i in 1:10
read!(f, img)
end
@test eltype(img) == Gray{N0f8}
end
end
@testset "Full load" begin
testvid_path = joinpath(VideoIO.TestVideos.videodir, "ladybird.mp4")
vid = VideoIO.load(testvid_path, target_format=VideoIO.AV_PIX_FMT_GRAY8)
@test eltype(first(vid)) == Gray{N0f8}
end
GC.gc()
end
@memory_profile
@testset "IO reading of various example file formats" begin
swscale_options = (sws_flags="accurate_rnd+full_chroma_inp+full_chroma_int",)
for testvid in values(VideoIO.TestVideos.videofiles)
name = testvid.name
test_frameno = testvid.testframe
# TODO: fix me?
(startswith(name, "ladybird") || startswith(name, "NPS")) && continue
@testset "Testing $name" begin
testvid_path = joinpath(VideoIO.TestVideos.videodir, name)
comparison_frame = make_comparison_frame_png(load, testvid_path, test_frameno)
filename = joinpath(videodir, name)
VideoIO.openvideo(filename; swscale_options=swscale_options) do v
width, height = VideoIO.out_frame_size(v)
if size(comparison_frame, 1) > height
trimmed_comparison_frame = comparison_frame[1+size(comparison_frame, 1)-height:end, :]
else
trimmed_comparison_frame = comparison_frame
end
img = read(v)
i = 1
while i < test_frameno
read!(v, img)
i += 1
end
test_compare_frames(img, trimmed_comparison_frame, required_accuracy)
while !eof(v)
read!(v, img)
end
# Iterator interface
VT = typeof(v)
@test Base.IteratorSize(VT) === Base.SizeUnknown()
@test Base.IteratorEltype(VT) === Base.EltypeUnknown()
VideoIO.seekstart(v)
i = 0
local first_frame
local last_frame
for frame in v
i += 1
if i == 1
first_frame = frame
end
last_frame = frame
end
@test i == VideoIO.TestVideos.videofiles[name].numframes
# test that the frames returned by the iterator have distinct storage
if i > 1
@test first_frame !== last_frame
end
## Test that iterator is mutable, and continues where iteration last
## stopped.
@test iterate(v) === nothing
end
GC.gc()
end
end
VideoIO.testvideo("ladybird") # coverage testing
@test_throws ErrorException VideoIO.testvideo("rickroll")
@test_throws ErrorException VideoIO.testvideo("")
GC.gc()
end
@memory_profile
@testset "Reading video metadata" begin
@testset "Reading Storage Aspect Ratio: SAR" begin
# currently, the SAR of all the test videos is 1, we should get another video with a valid SAR that is not equal to 1
vids = Dict("ladybird.mp4" => 1, "black_hole.webm" => 1, "crescent-moon.ogv" => 1, "annie_oakley.ogg" => 1)
@test all(VideoIO.aspect_ratio(VideoIO.openvideo(joinpath(videodir, k))) == v for (k, v) in vids)
end
@testset "Reading video duration, start date, and duration" begin
# tesing the duration and date & time functions:
file = joinpath(videodir, "annie_oakley.ogg")
@test VideoIO.get_duration(file) == 24224200 / 1e6
@test VideoIO.get_start_time(file) == DateTime(1970, 1, 1)
@test VideoIO.get_time_duration(file) == (DateTime(1970, 1, 1), 24224200 / 1e6)
@test VideoIO.get_number_frames(file) === nothing
end
@testset "Reading the number of frames from container" begin
file = joinpath(videodir, "ladybird.mp4")
@test VideoIO.get_number_frames(file) == 398
@test VideoIO.get_number_frames(file, 0) == 398
@test_throws ArgumentError VideoIO.get_number_frames(file, -1)
@test_throws ErrorException VideoIO.get_number_frames("Not_a_file")
end
end
@memory_profile
| VideoIO | https://github.com/JuliaIO/VideoIO.jl.git |
|
[
"MIT"
] | 1.1.0 | 4aaf8a88550d628b8142ed6772901fdf9cef55fd | code | 1038 | using Test
using ColorTypes: RGB, Gray, N0f8, red, green, blue
using ColorVectorSpace: ColorVectorSpace
using FileIO, ImageCore, Dates, Statistics, StatsBase
using Profile
using FFMPEG: FFMPEG
using VideoIO: VideoIO
const testdir = dirname(@__FILE__)
const videodir = VideoIO.TestVideos.videodir
const tempvidname = "testvideo.mp4"
const tempvidpath = joinpath(tempdir(), tempvidname)
const required_accuracy = 0.07
# VideoIO.TestVideos.available()
VideoIO.TestVideos.download_all()
include("utils.jl") # Testing utility functions
memory_profiling = get(ENV, "VIDEOIO_MEMPROFILE", "false") === "true" && Base.thisminor(Base.VERSION) >= v"1.9"
start_time = time()
@memory_profile
@testset "VideoIO" verbose = true begin
include("avptr.jl")
@memory_profile
include("reading.jl")
@memory_profile
include("writing.jl")
@memory_profile
include("accuracy.jl")
@memory_profile
GC.gc()
rm(tempvidpath, force = true)
include("bugs.jl")
@memory_profile
end
#VideoIO.TestVideos.remove_all()
| VideoIO | https://github.com/JuliaIO/VideoIO.jl.git |
|
[
"MIT"
] | 1.1.0 | 4aaf8a88550d628b8142ed6772901fdf9cef55fd | code | 5184 |
swapext(f, new_ext) = "$(splitext(f)[1])$new_ext"
isarm() = Base.Sys.ARCH in (:arm, :arm32, :arm7l, :armv7l, :arm8l, :armv8l, :aarch64, :arm64)
@noinline function isblank(img)
return all(c -> green(c) == 0, img) ||
all(c -> blue(c) == 0, img) ||
all(c -> red(c) == 0, img) ||
maximum(rawview(channelview(img))) < 0xcf
end
function compare_colors(a::RGB, b::RGB, tol)
ok = true
for f in (red, green, blue)
dev = abs(float(f(a)) - float(f(b)))
ok &= dev <= tol
end
return ok
end
# Helper functions
function test_compare_frames(test_frame, ref_frame, tol = 0.05)
if isarm()
@test_skip test_frame == ref_frame
else
frames_similar = true
for (a, b) in zip(test_frame, ref_frame)
frames_similar &= compare_colors(a, b, tol)
end
@test frames_similar
end
end
# uses read!
function read_frameno!(img, v, frameno)
seekstart(v)
i = 0
while !eof(v) && i < frameno
read!(v, img)
i += 1
end
end
function make_comparison_frame_png(vidpath::AbstractString, frameno::Integer, writedir = tempdir())
vid_basename = first(splitext(basename(vidpath)))
png_name = joinpath(writedir, "$(vid_basename)_$(frameno).png")
FFMPEG.exe(
`-y -v error -i $(vidpath) -vf "sws_flags=accurate_rnd+full_chroma_inp+full_chroma_int; select=eq(n\,$(frameno-1))" -vframes 1 $(png_name)`,
)
return png_name
end
function make_comparison_frame_png(f, args...)
png_name = make_comparison_frame_png(args...)
try
f(png_name)
finally
rm(png_name, force = true)
end
end
function get_video_extrema(v)
img = read(v)
raw_img = parent(img)
# Test that the limited range of this video is converted to full range
minp, maxp = extrema(raw_img)
while !eof(v)
read!(v, raw_img)
this_minp, this_maxp = extrema(raw_img)
minp = min(minp, this_minp)
maxp = max(maxp, this_maxp)
end
return minp, maxp
end
function get_raw_luma_extrema(elt, vidpath, nw, nh)
buff, align = VideoIO.openvideo(vidpath) do v
return VideoIO.read_raw(v, 1)
end
luma_buff = view(buff, 1:nw*nh*sizeof(elt))
luma_vals = reinterpret(elt, luma_buff)
return reinterpret.(extrema(luma_vals))
end
using ColorTypes: RGB, HSV
using FixedPointNumbers: Normed, N6f10
using Base: ReinterpretArray
function test_tone!(a::AbstractMatrix{X}, offset = 0, minval = 0, maxval = reinterpret(one(X))) where {T,X<:Normed{T}}
maxcodept = reinterpret(one(X))
modsize = maxval - minval + 1
@inbounds for i in eachindex(a)
a[i] = reinterpret(X, T(minval + mod(i + offset - 1, modsize)))
end
return a
end
function test_tone!(
a::AbstractMatrix{C},
offset = 0,
minval = 0,
maxval = reinterpet(one(X)),
) where {T,X<:Normed{T},C<:RGB{X}}
modsize = maxval - minval + 1
@inbounds for i in eachindex(a)
h = mod(i, 360)
v = minval + mod(i + offset - 1, modsize) / maxcodept
hsv = HSV(h, 1, v)
a[i] = convert(RGB{X}, hsv)
end
return a
end
test_tone(::Type{T}, nx::Integer, ny, args...) where {T} = test_tone!(Matrix{T}(undef, nx, ny), args...)
test_tone(nx::Integer, ny, args...) = test_tone(N6f10, nx, ny, args...)
function make_test_tones(::Type{T}, nx, ny, nf, args...) where {T}
imgs = Vector{Matrix{T}}(undef, nf)
@inbounds for i in 1:nf
imgs[i] = test_tone(T, nx, ny, i - 1, args...)
end
return imgs
end
sizeof_parent(buf::Array) = sizeof(buf)
sizeof_parent(buf::ReinterpretArray) = sizeof_parent(parent(buf))
function copy_imgbuf_to_buf!(
buf::StridedArray{UInt8},
bwidth::Integer,
fheight::Integer,
imgbufp::Ptr{UInt8},
linesize::Integer,
)
sizeof_parent(buf) < bwidth * fheight && throw(ArgumentError("buf is not large enough"))
for lineno in 1:fheight
offno = lineno - 1
bufp = pointer(buf, bwidth * offno + 1)
imgp = imgbufp + linesize * offno
GC.@preserve buf unsafe_copyto!(bufp, imgp, bwidth)
end
end
function copy_imgbuf_to_buf!(
buf::StridedArray{UInt8},
bwidth::Integer,
fheight::Integer,
imgbuf::StridedArray{UInt8},
align::Integer,
)
linesize = align * cld(bwidth, align)
imgbufp = pointer(imgbuf)
GC.@preserve imgbuf copy_imgbuf_to_buf!(buf, bwidth, fheight, imgbufp, linesize)
end
function copy_imgbuf_to_buf!(buf::StridedArray, fwidth::Integer, fheight::Integer, nbytesperpixel::Integer, args...)
bwidth = nbytesperpixel * fwidth
return copy_imgbuf_to_buf!(reinterpret(UInt8, buf), bwidth, fheight, args...)
end
macro memory_profile()
if memory_profiling
_line = __source__.line
_file = string(__source__.file)
_mod = __module__
quote
local snap_fpath = Profile.take_heap_snapshot()
local free_mem = Base.format_bytes(Sys.free_memory())
local total_mem = Base.format_bytes(Sys.total_memory())
@warn "Memory profile @ $(time() - start_time)s" free_mem total_mem snap_fpath _module=$_mod _line=$_line _file=$(repr(_file))
end
end
end
| VideoIO | https://github.com/JuliaIO/VideoIO.jl.git |
|
[
"MIT"
] | 1.1.0 | 4aaf8a88550d628b8142ed6772901fdf9cef55fd | code | 8628 | @testset "Encoding video across all supported colortypes" begin
for el in [UInt8, RGB{N0f8}]
@testset "Encoding $el imagestack" begin
n = 100
imgstack = map(x -> rand(el, 100, 100), 1:n)
encoder_options = (color_range = 2, crf = 0, preset = "medium")
VideoIO.save(tempvidpath, imgstack, framerate = 30, encoder_options = encoder_options)
@test stat(tempvidpath).size > 100
@test VideoIO.openvideo(VideoIO.counttotalframes, tempvidpath) == n
end
end
end
@testset "Simultaneous encoding and muxing" begin
n = 100
encoder_options = (color_range = 2,)
container_private_options = (movflags = "+write_colr",)
for el in [Gray{N0f8}, Gray{N6f10}, RGB{N0f8}, RGB{N6f10}]
codec_name = el <: RGB ? "libx264rgb" : "libx264" # the former is necessary for lossless RGB
for scanline_arg in [true, false]
for sz in [100, 128] # 100 tests where julia<>ffmpeg imgbuf size doesn't match, 128 when it does
@testset "Encoding $el imagestack, scanline_major = $scanline_arg, size = $sz" begin
img_stack = map(x -> rand(el, sz, sz), 1:n)
encoder_private_options = (crf = 0, preset = "medium")
VideoIO.save(
tempvidpath,
img_stack;
codec_name = codec_name,
encoder_private_options = encoder_private_options,
encoder_options = encoder_options,
container_private_options = container_private_options,
scanline_major = scanline_arg,
)
@test stat(tempvidpath).size > 100
f = VideoIO.openvideo(tempvidpath, target_format = VideoIO.get_transfer_pix_fmt(el))
try
notempty = !eof(f)
@test notempty
if notempty
img = read(f)
test_img = scanline_arg ? parent(img) : img
i = 1
if el in [Gray{N0f8}, RGB{N0f8}]
@test test_img == img_stack[i]
else
@test_broken test_img == img_stack[i]
end
while !eof(f) && i < n
read!(f, img)
i += 1
if el in [Gray{N0f8}, RGB{N0f8}]
@test test_img == img_stack[i]
else
@test_broken test_img == img_stack[i]
end
end
@test i == n
end
finally
close(f)
end
end
end
end
end
end
@testset "Monochrome rescaling" begin
nw = nh = 100
s = VideoIO.GrayTransform()
s.srcframe.color_range = VideoIO.AVCOL_RANGE_JPEG
s.dstframe.color_range = VideoIO.AVCOL_RANGE_MPEG
s.srcframe.format = VideoIO.AV_PIX_FMT_GRAY8
s.dstframe.format = VideoIO.AV_PIX_FMT_GRAY8
s.src_depth = s.dst_depth = 8
f, src_t, dst_t = VideoIO.make_scale_function(s)
@test f(0x00) == 16
@test f(0xff) == 235
s.srcframe.format = s.dstframe.format = VideoIO.AV_PIX_FMT_GRAY10LE
s.src_depth = s.dst_depth = 10
f, src_t, dst_t = VideoIO.make_scale_function(s)
@test f(0x0000) == 64
@test f(UInt16(1023)) == 940
# Test that range conversion is working properly
img_full_range = reinterpret(UInt16, test_tone(N6f10, nw, nh))
writer = VideoIO.open_video_out(
tempvidpath,
img_full_range;
target_pix_fmt = VideoIO.AV_PIX_FMT_GRAY10LE,
scanline_major = true,
)
@test VideoIO.get_codec_name(writer) != "None"
try
VideoIO.write(writer, img_full_range)
bwidth = nw * 2
buff = Vector{UInt8}(undef, bwidth * nh)
# Input frame should be full range
copy_imgbuf_to_buf!(
buff,
bwidth,
nh,
writer.frame_graph.srcframe.data[1],
writer.frame_graph.srcframe.linesize[1],
)
raw_vals = reinterpret(UInt16, buff)
@test extrema(raw_vals) == (0x0000, 0x03ff)
# Output frame should be limited range
copy_imgbuf_to_buf!(
buff,
bwidth,
nh,
writer.frame_graph.dstframe.data[1],
writer.frame_graph.dstframe.linesize[1],
)
@test extrema(raw_vals) == (0x0040, 0x03ac)
finally
VideoIO.close_video_out!(writer)
end
@test_throws ErrorException VideoIO.write(writer, img_full_range)
end
@testset "Encoding monochrome videos" begin
encoder_private_options = (crf = 0, preset = "fast")
nw = nh = 100
nf = 5
for elt in (N0f8, N6f10)
if elt == N0f8
limited_min = 16
limited_max = 235
full_min = 0
full_max = 255
target_fmt = VideoIO.AV_PIX_FMT_GRAY8
else
limited_min = 64
limited_max = 940
full_min = 0
full_max = 1023
target_fmt = VideoIO.AV_PIX_FMT_GRAY10LE
end
img_stack_full_range = make_test_tones(elt, nw, nh, nf)
# Test that full-range input is automatically converted to limited range
VideoIO.save(
tempvidpath,
img_stack_full_range,
target_pix_fmt = target_fmt,
encoder_private_options = encoder_private_options,
)
minp, maxp = get_raw_luma_extrema(elt, tempvidpath, nw, nh)
@test minp > full_min
@test maxp < full_max
# Test that this conversion is NOT done if output video is full range
VideoIO.save(
tempvidpath,
img_stack_full_range,
target_pix_fmt = target_fmt,
encoder_private_options = encoder_private_options,
encoder_options = (color_range = 2,),
)
minp, maxp = get_raw_luma_extrema(elt, tempvidpath, nw, nh)
@test minp == full_min
@test maxp == full_max
# Test that you can override this automatic conversion when writing videos
img_stack_limited_range = make_test_tones(elt, nw, nh, nf, limited_min, limited_max)
VideoIO.save(
tempvidpath,
img_stack_limited_range,
target_pix_fmt = target_fmt,
encoder_private_options = encoder_private_options,
input_colorspace_details = VideoIO.VioColorspaceDetails(),
)
minp, maxp = get_raw_luma_extrema(elt, tempvidpath, nw, nh)
@test minp > full_min # Actual N6f10 values are messed up during encoding
@test maxp < full_max # Actual N6f10 values are messed up during encoding
end
end
@testset "Encoding video with rational frame rates" begin
n = 100
fr = 59 // 2 # 29.5
target_dur = 3.39
@testset "Encoding with frame rate $(float(fr))" begin
imgstack = map(x -> rand(UInt8, 100, 100), 1:n)
encoder_options = (color_range = 2, crf = 0, preset = "medium")
VideoIO.save(tempvidpath, imgstack, framerate = fr, encoder_options = encoder_options)
@test stat(tempvidpath).size > 100
measured_dur_str = VideoIO.FFMPEG.exe(
`-v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 $(tempvidpath)`,
command = VideoIO.FFMPEG.ffprobe,
collect = true,
)
@test parse(Float64, measured_dur_str[1]) == target_dur
end
end
@testset "Encoding video with float frame rates" begin
n = 100
fr = 29.5 # 59 // 2
target_dur = 3.39
@testset "Encoding with frame rate $(float(fr))" begin
imgstack = map(x -> rand(UInt8, 100, 100), 1:n)
encoder_options = (color_range = 2, crf = 0, preset = "medium")
VideoIO.save(tempvidpath, imgstack, framerate = fr, encoder_options = encoder_options)
@test stat(tempvidpath).size > 100
measured_dur_str = VideoIO.FFMPEG.exe(
`-v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 $(tempvidpath)`,
command = VideoIO.FFMPEG.ffprobe,
collect = true,
)
@test parse(Float64, measured_dur_str[1]) == target_dur
end
end
| VideoIO | https://github.com/JuliaIO/VideoIO.jl.git |
|
[
"MIT"
] | 1.1.0 | 4aaf8a88550d628b8142ed6772901fdf9cef55fd | code | 8049 | # A tool for testing lossless video encoding
using VideoIO, ColorTypes, FixedPointNumbers, DataFrames
function collectexecoutput(exec::Cmd)
out = Pipe()
err = Pipe()
p = Base.open(pipeline(ignorestatus(exec), stdout = out, stderr = err))
close(out.in)
close(err.in)
err_s = readlines(err)
out_s = readlines(out)
return (length(out_s) > length(err_s)) ? out_s : err_s
end
function createtestvideo(;
filename::String = "$(tempname()).mp4",
duration::Real = 5,
width::Int64 = 1280,
height::Int64 = 720,
framerate::Real = 30,
testtype::String = "testsrc2",
encoder::String = "libx264rgb",
)
withenv(VideoIO.execenv) do
return collectexecoutput(`$(VideoIO.ffmpeg) -y -f lavfi -i
$testtype=duration=$duration:size=$(width)x$(height):rate=$framerate
-c:v $encoder -preset slow -crf 0 -c:a copy $filename`)
end
return filename
end
function testvideocomp!(df, preset, imgstack_gray)
t = @elapsed VideoIO.save(
"video.mp4",
imgstack_gray,
framerate = 30,
codec_name = "libx264",
encoder_options = (color_range = 2, crf = 0, "preset" = preset),
)
fs = filesize("video.mp4")
f = openvideo("video.mp4", target_format = VideoIO.AV_PIX_FMT_GRAY8)
imgstack_gray_copy = []
while !eof(f)
push!(imgstack_gray_copy, read(f))
end
identical = !any(.!(imgstack_gray .== imgstack_gray_copy))
return push!(df, [preset, fs, t, identical])
end
imgstack_gray_noise = map(x -> rand(Gray{N0f8}, 1280, 720), 1:1000)
f = openvideo(createtestvideo())
imgstack = []
while !eof(f)
push!(imgstack, read(f))
end
imgstack_gray_testvid = map(x -> convert.(Gray{N0f8}, x), imgstack)
f = openvideo("videos/ladybird.mp4")
imgstack = []
while !eof(f)
push!(imgstack, read(f))
end
imgstack_gray_ladybird = map(x -> convert.(Gray{N0f8}, x), imgstack)
df_noise = DataFrame(preset = [], filesize = [], time = [], identical = [])
df_testvid = DataFrame(preset = [], filesize = [], time = [], identical = [])
df_ladybird = DataFrame(preset = [], filesize = [], time = [], identical = [])
for preset in ["ultrafast", "superfast", "veryfast", "faster", "fast", "medium", "slow", "slower", "veryslow"]
@show preset
for rep in 1:3
@show rep
testvideocomp!(df_noise, preset, imgstack_gray_noise)
testvideocomp!(df_testvid, preset, imgstack_gray_testvid)
testvideocomp!(df_ladybird, preset, imgstack_gray_ladybird)
end
end
noise_raw_size = size(imgstack_gray_noise[1], 1) * size(imgstack_gray_noise[1], 2) * length(imgstack_gray_noise)
testvid_raw_size = size(imgstack_gray_testvid[1], 1) * size(imgstack_gray_testvid[1], 2) * length(imgstack_gray_testvid)
ladybird_raw_size =
size(imgstack_gray_ladybird[1], 1) * size(imgstack_gray_ladybird[1], 2) * length(imgstack_gray_ladybird)
df_noise[:filesize_perc] = 100 * (df_noise[:filesize] ./ noise_raw_size)
df_testvid[:filesize_perc] = 100 * (df_testvid[:filesize] ./ testvid_raw_size)
df_ladybird[:filesize_perc] = 100 * (df_ladybird[:filesize] ./ ladybird_raw_size)
df_noise[:fps] = length(imgstack_gray_noise) ./ df_noise[:time]
df_testvid[:fps] = length(imgstack_gray_testvid) ./ df_testvid[:time]
df_ladybird[:fps] = length(imgstack_gray_ladybird) ./ df_ladybird[:time]
using Statistics
df_noise_summary = by(
df_noise,
:preset,
identical = :identical => minimum,
fps_mean = :fps => mean,
fps_std = :fps => std,
filesize_perc_mean = :filesize_perc => mean,
filesize_perc_std = :filesize_perc => std,
)
df_testvid_summary = by(
df_testvid,
:preset,
identical = :identical => minimum,
fps_mean = :fps => mean,
fps_std = :fps => std,
filesize_perc_mean = :filesize_perc => mean,
filesize_perc_std = :filesize_perc => std,
)
df_ladybird_summary = by(
df_ladybird,
:preset,
identical = :identical => minimum,
fps_mean = :fps => mean,
fps_std = :fps => std,
filesize_perc_mean = :filesize_perc => mean,
filesize_perc_std = :filesize_perc => std,
)
@show df_noise_summary
@show df_testvid_summary
@show df_ladybird_summary
### Results (generated 2019-05-29 on a 2019 Macbook Pro)
### OUTDATED. Generated before change to VideoIO.save
#=
df_noise_summary = 9×6 DataFrame
│ Row │ preset │ identical │ fps_mean │ fps_std │ filesize_perc_mean │ filesize_perc_std │
│ │ Any │ Bool │ Float64 │ Float64 │ Float64 │ Float64 │
├─────┼───────────┼───────────┼──────────┼─────────┼────────────────────┼───────────────────┤
│ 1 │ ultrafast │ true │ 92.5769 │ 8.40224 │ 156.444 │ 0.0 │
│ 2 │ superfast │ true │ 62.3509 │ 1.19652 │ 144.019 │ 0.0 │
│ 3 │ veryfast │ true │ 59.9182 │ 1.77294 │ 144.019 │ 0.0 │
│ 4 │ faster │ true │ 60.3482 │ 2.32679 │ 144.02 │ 0.0 │
│ 5 │ fast │ true │ 149.169 │ 1.56068 │ 100.784 │ 0.0 │
│ 6 │ medium │ true │ 146.141 │ 3.41282 │ 100.784 │ 0.0 │
│ 7 │ slow │ true │ 147.214 │ 1.23929 │ 100.784 │ 0.0 │
│ 8 │ slower │ true │ 138.808 │ 2.553 │ 100.784 │ 0.0 │
│ 9 │ veryslow │ true │ 132.505 │ 3.28558 │ 100.784 │ 0.0 │
df_testvid_summary = 9×6 DataFrame
│ Row │ preset │ identical │ fps_mean │ fps_std │ filesize_perc_mean │ filesize_perc_std │
│ │ Any │ Bool │ Float64 │ Float64 │ Float64 │ Float64 │
├─────┼───────────┼───────────┼──────────┼─────────┼────────────────────┼───────────────────┤
│ 1 │ ultrafast │ true │ 228.166 │ 75.1439 │ 4.80392 │ 0.0 │
│ 2 │ superfast │ true │ 239.73 │ 54.2033 │ 3.62199 │ 0.0 │
│ 3 │ veryfast │ true │ 197.506 │ 13.1121 │ 3.59901 │ 0.0 │
│ 4 │ faster │ true │ 174.174 │ 18.0316 │ 3.60282 │ 0.0 │
│ 5 │ fast │ true │ 235.181 │ 7.40358 │ 3.44104 │ 0.0 │
│ 6 │ medium │ true │ 219.654 │ 3.27445 │ 3.40832 │ 0.0 │
│ 7 │ slow │ true │ 171.337 │ 3.92415 │ 3.33917 │ 0.0 │
│ 8 │ slower │ true │ 105.24 │ 6.59151 │ 3.25774 │ 5.43896e-16 │
│ 9 │ veryslow │ true │ 63.1136 │ 2.47291 │ 3.2219 │ 0.0 │
df_ladybird_summary = 9×6 DataFrame
│ Row │ preset │ identical │ fps_mean │ fps_std │ filesize_perc_mean │ filesize_perc_std │
│ │ Any │ Bool │ Float64 │ Float64 │ Float64 │ Float64 │
├─────┼───────────┼───────────┼──────────┼──────────┼────────────────────┼───────────────────┤
│ 1 │ ultrafast │ true │ 176.787 │ 36.5227 │ 12.2293 │ 0.0 │
│ 2 │ superfast │ true │ 135.925 │ 7.04431 │ 10.3532 │ 0.0 │
│ 3 │ veryfast │ true │ 117.115 │ 1.28102 │ 10.1954 │ 0.0 │
│ 4 │ faster │ true │ 94.39 │ 3.48494 │ 9.85604 │ 0.0 │
│ 5 │ fast │ true │ 69.657 │ 1.61004 │ 9.62724 │ 0.0 │
│ 6 │ medium │ true │ 54.9621 │ 0.568074 │ 9.51032 │ 0.0 │
│ 7 │ slow │ true │ 37.8888 │ 1.27484 │ 9.33622 │ 0.0 │
│ 8 │ slower │ true │ 20.1112 │ 1.04282 │ 9.25529 │ 0.0 │
│ 9 │ veryslow │ true │ 10.0016 │ 0.473213 │ 9.24999 │ 0.0 │
=#
# HISTOGRAM COMPARISON - useful for diagnosing range compression
# using PyPlot, ImageCore
# figure()
# hist(rawview(channelview(imgstack_gray_copy[1]))[:],0:256,label="copy")
# hist(rawview(channelview(imgstack_gray[1]))[:],0:256,label="original")
# legend()
| VideoIO | https://github.com/JuliaIO/VideoIO.jl.git |
|
[
"MIT"
] | 1.1.0 | 4aaf8a88550d628b8142ed6772901fdf9cef55fd | docs | 7189 | VideoIO v1.1.0 Release Notes
======================
## Removal
- The GLMakie-based video player that was accessed through Requires by loading GLMakie separately has been removed
after being deprecated in v1.0.8.
VideoIO v0.9 Release Notes
======================
## New features
- Add support for 10 bit gray, RGB, and YUV encoding and decoding
- Support do-block syntax for `openvideo` and `open_video_out`
- Support 444 chroma downsampling, among other pixel formats
- Simultaneous muxing and encoding by default
- Support "scanline-major" encoding
- New `VideoIO.load(filename::String; ...)` function to read entire video into memory
- Allow ffmpeg to choose default codec based on container format
## Bugfixes
- Encoding videos now encodes the correct number of frames
- Fixed seeking inaccuracies (#275, #242)
- Fix bug which caused the last two frames to be dropped while reading some videos (#270)
- Fix bug which caused the first two frames to be dropped when writing videos (#271)
- Prevent final frame in video stream from being hidden due to it having zero duration
- Fix inconsistency with color ranges and clipping of user input (#283)
- Make color encoding more similar to ffmpeg binary (#284)
- Make the first video frame occurs at `pts` 0, not 1
- Make image buffer for decoding compatible with multi-planar image formats
- Eliminate use of deprecated libav functions
- Properly initialize libav structs
- Make `NO_TRANSCODE` encoding work
- Reduce multi-threaded access to libav functions that are not thread safe
- Make code generally more safe from accessing data after the GC frees it
- Eliminate segfault when `VideoReader` was used with `IOStream`
- Reduce type instability
## Breaking Changes
The encoding API has been renamed and simplified:
| Original function | Equivalent function in v0.9 |
| :---------------- | :-------------------------- |
| `encode_video` | `save` |
| `prepareencoder` | `open_video_out!` |
| `appendencode!` | `write` |
| `finish_encode` | `close_video_out!` |
| `close(io)` | N/A (no longer needed) |
| `mux` | N/A (no longer needed) |
The keyword arguments of the replacement functions may no longer be the same as
the original, so please see their documentation. In particular, note that
`AVCodecContextProperties` has been replaced with `encoder_options` and
`encoder_private_options`.
### Single-shot encoding
Before:
```julia
using VideoIO
props = [:priv_data => ("crf"=>"22","preset"=>"medium")]
encodevideo("video.mp4", imgstack, framerate=30, AVCodecContextProperties=props)
```
v0.9:
```julia
using VideoIO
encoder_options = (crf=23, preset="medium")
VideoIO.save("video.mp4", imgstack, framerate=30, encoder_options=encoder_options)
```
Note that `save` is not exported.
Also note that the encoder options are now provided as a named tuple.
VideoIO will automatically attempt to route these options between the public and private ffmpeg options, so for instance
it is possible to specify lossless settings as:
```julia
VideoIO.save("video.mp4", imgstack, framerate=30,
encoder_options=(color_range=2, crf=0, preset="medium")
)
```
however the most fail-safe way would be to specify the public and private options specifically
```julia
VideoIO.save("video.mp4", imgstack, framerate=30,
encoder_options=(color_range=2),
encoder_private_options=(crf=0, preset="medium")
)
```
### Iterative encoding
Before:
```julia
framestack = map(x->rand(UInt8, 100, 100), 1:100) #vector of 2D arrays
using VideoIO
props = [:priv_data => ("crf"=>"22","preset"=>"medium")]
encoder = prepareencoder(first(framestack), framerate=24, AVCodecContextProperties=props)
open("temp.stream", "w") do io
for i in eachindex(framestack)
appendencode!(encoder, io, framestack[i], i)
end
finishencode!(encoder, io)
end
mux("temp.stream", "video.mp4", framerate) #Multiplexes the stream into a video container
```
v0.9:
```julia
framestack = map(x->rand(UInt8, 100, 100), 1:100) #vector of 2D arrays
using VideoIO
encoder_options = (crf=23, preset="medium")
open_video_out("video.mp4", first(framestack), framerate=30, encoder_options=encoder_options) do writer
for frame in framestack
write(writer, frame)
end
end
```
Note that the multiplexing (mux) is now done in parallel with the encoding loop, so no need for an intermediate
".stream" file. Lower level functions should be used for more elaborate encoding/multiplexing tasks.
### Performance improvement
The speed of encoding `UInt8` frames losslessly has more than doubled.
And.. encoding no longer has verbose printing.
For `imgstack = map(x-> rand(UInt8, 2048, 1536), 1:100)`
v0.8.4
```julia
julia> props = [:color_range=>2, :priv_data => ("crf"=>"0","preset"=>"ultrafast")];
julia> @btime encodevideo("video.mp4", imgstack, AVCodecContextProperties = props)
Progress: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| Time: 0:00:01
[ Info: Video file saved: /Users/ian/video.mp4
frame= 100 fps= 39 q=-1.0 Lsize= 480574kB time=00:00:04.12 bitrate=954382.6kbits/s speed= 1.6x 77x
[ Info: video:480572kB audio:0kB subtitle:0kB other streams:0kB global headers:0kB muxing overhead: 0.000459%
... # hiding 6 subsequent repeated outputs
4.163 s (2205 allocations: 340.28 KiB)
"video.mp4"
```
v0.9.0
```julia
julia> @btime VideoIO.save("video.mp4", imgstack, encoder_options = (color_range=2,crf=0,preset="ultrafast"))
1.888 s (445 allocations: 7.22 KiB)
```
## Known performance "regression"
Monochrome encoding with default arguments has become a bit slower in
v0.9. This is because by default user supplied data is assumed to be full-range
(jpeg), while the default libav output range is limited range (mpeg), and
VideoIO will now scale the user data to fit in the limited destination range.
Prior to v0.9, no such automatic scaling would be done, causing the user data to
be simply clipped. While this may seem like a regression, it is actually the
consequence of fixing a bug in the previous versions of VideoIO.
To avoid this slowdown, either specify `color_range=2` in `encoder_options`, or
alternatively specify the color space of the user-supplied data to already be
limited range. Note that `color_range=2` may produce videos that are
incompatible with some video players.
If you are encoding data that is already limited range, then the simplest way to
avoid automatic scaling is to indicate that the user data is in the FFmpeg
default colorspace. This is accomplished by setting
`input_colorspace_details = VideoIO.VioColorspaceDetails()` when encoding the
video. While the FFmpeg default color range is "unknown", setting this will
also prevent automatic scaling by VideoIO. If you have further details about
your input colorspace, and your colorspace differs from Julia's default, then
create a `VioColorspaceDetails` object with the settings that correspond to your
input data's colorspace.
In the future we hope to make this re-scaling more performant so it won't be as
noticeable.
| VideoIO | https://github.com/JuliaIO/VideoIO.jl.git |
|
[
"MIT"
] | 1.1.0 | 4aaf8a88550d628b8142ed6772901fdf9cef55fd | docs | 2630 |
# VideoIO.jl
<img align="right" width="90" src="docs/src/assets/logo.png">
*Reading and writing of video files in Julia.*
Functionality based on a dedicated build of ffmpeg via [FFMPEG.jl](https://github.com/JuliaIO/FFMPEG.jl) and the [JuliaPackaging/Yggdrasil](https://github.com/JuliaPackaging/Yggdrasil/tree/master/F/FFMPEG) cross-compiler.
**Docs**
[![][docs-stable-img]][docs-stable-url] [![][docs-dev-img]][docs-dev-url] [](https://julialang.org/slack/)
## Installation
The package can be installed with the Julia package manager.
From the Julia REPL, type `]` to enter the Pkg REPL mode and run:
```
pkg> add VideoIO
```
Or, equivalently, via the `Pkg` API:
```julia
julia> import Pkg; Pkg.add("VideoIO")
```
## Documentation
- [![][docs-stable-img]][docs-stable-url] — **documentation of the most recently tagged version.**
- [![][docs-dev-img]][docs-dev-url] — *documentation of the in-development version.*
## Project Status
The package is tested against, and being developed for, Julia `v1` on Linux, macOS, and Windows, for x86, x86_64, armv7 and armv8 (aarch64).
## Questions and Contributions
Usage questions can be posted on the [Julia Discourse forum][discourse-tag-url] under the `videoio` tag, and/or in the #video channel of the [Julia Slack](https://julialang.org/community/).
Contributions are very welcome, as are feature requests and suggestions. Please open an [issue][issues-url] if you encounter any problems.
[discourse-tag-url]: https://discourse.julialang.org/tags/videoio
[docs-dev-img]: https://img.shields.io/badge/docs-dev-blue.svg
[docs-dev-url]: https://juliaio.github.io/VideoIO.jl/latest
[docs-stable-img]: https://img.shields.io/badge/docs-stable-blue.svg
[docs-stable-url]: https://juliaio.github.io/VideoIO.jl/stable
[travis-img]: https://travis-ci.org/JuliaIO/VideoIO.jl.svg?branch=master
[travis-url]: https://travis-ci.org/JuliaIO/VideoIO.jl
[appveyor-img]: https://ci.appveyor.com/api/projects/status/c1nc5aavymq76xun?svg=true
[appveyor-url]: https://ci.appveyor.com/project/JuliaIO/videoio-jl
[drone-img]: https://cloud.drone.io/api/badges/JuliaIO/VideoIO.jl/status.svg
[drone-url]: https://cloud.drone.io/JuliaIO/VideoIO.jl
[cirrus-img]: https://api.cirrus-ci.com/github/JuliaIO/VideoIO.jl.svg
[cirrus-url]: https://cirrus-ci.com/github/JuliaIO/VideoIO.jl
[codecov-img]: https://codecov.io/gh/JuliaIO/VideoIO.jl/branch/master/graph/badge.svg
[codecov-url]: https://codecov.io/gh/JuliaIO/VideoIO.jl
[issues-url]: https://github.com/JuliaIO/VideoIO.jl/issues
____
| VideoIO | https://github.com/JuliaIO/VideoIO.jl.git |
|
[
"MIT"
] | 1.1.0 | 4aaf8a88550d628b8142ed6772901fdf9cef55fd | docs | 895 | # Introduction
This library provides methods for reading and writing video files.
Functionality is based on a dedicated build of ffmpeg, provided via [JuliaPackaging/Yggdrasil](https://github.com/JuliaPackaging/Yggdrasil/tree/master/F/FFMPEG)
Explore the source at [github.com/JuliaIO/VideoIO.jl](https://github.com/JuliaIO/VideoIO.jl)
### Platform Notes:
- ARM: For truly lossless reading & writing, there is a known issue on ARM that results in small precision differences when reading/writing some video files. As such, tests for frame comparison are currently skipped on ARM. Issues/PRs welcome for helping to get this fixed.
## Installation
The package can be installed with the Julia package manager.
From the Julia REPL, type `]` to enter the Pkg REPL mode and run:
```
pkg> add VideoIO
```
Or, equivalently, via the `Pkg` API:
```julia
julia> import Pkg; Pkg.add("VideoIO")
```
| VideoIO | https://github.com/JuliaIO/VideoIO.jl.git |
|
[
"MIT"
] | 1.1.0 | 4aaf8a88550d628b8142ed6772901fdf9cef55fd | docs | 1492 | # Low level functionality
## FFMPEG log level
FFMPEG's built-in logging and warning level can be read and set with
```@docs
VideoIO.loglevel!
```
```@docs
VideoIO.loglevel
```
## FFMPEG interface
Each ffmpeg library has its own VideoIO subpackage:
libavcodec -> AVCodecs
libavdevice -> AVDevice
libavfilter -> AVFilters
libavformat -> AVFormat
libavutil -> AVUtil
libswscale -> SWScale
The following three files are related to ffmpeg, but currently not
exposed:
libswresample -> SWResample
libpostproc -> PostProc (not wrapped)
After importing VideoIO, you can import and use any of the subpackages directly
import VideoIO
import SWResample # SWResample functions are now available
Note that much of the functionality of these subpackages is not enabled
by default, to avoid long compilation times as they load. To control
what is loaded, each library version has a file which imports that's
modules files. For example, ffmpeg's libswscale-v2 files are loaded by
$(VideoIO_PKG_DIR)/src/ffmpeg/SWScale/v2/LIBSWSCALE.jl.
Check these files to enable any needed functionality that isn't already
enabled. Note that you'll probably need to do this for each version
of the package for ffmpeg, and that the interfaces do
change some from version to version.
Note that, in general, the low-level functions are not very fun to use,
so it is good to focus initially on enabling a nice, higher-level
function for these interfaces.
| VideoIO | https://github.com/JuliaIO/VideoIO.jl.git |
|
[
"MIT"
] | 1.1.0 | 4aaf8a88550d628b8142ed6772901fdf9cef55fd | docs | 3856 | # Video Reading
Note: Reading of audio streams is not yet implemented
## Reading Video Files
VideoIO contains a simple high-level interface which allows reading of
video frames from a supported video file (or from a camera device, shown later).
The simplest form will load the entire video into memory as a vector of image arrays.
```julia
using VideoIO
VideoIO.load("video.mp4")
```
```@docs
VideoIO.load
```
Frames can be read sequentially until the end of the file:
```julia
using VideoIO
# Construct a AVInput object to access the video and audio streams in a video container
# io = VideoIO.open(video_file)
io = VideoIO.testvideo("annie_oakley") # for testing purposes
# Access the video stream in an AVInput, and return a VideoReader object:
f = VideoIO.openvideo(io) # you can also use a file name, instead of a AVInput
img = read(f)
while !eof(f)
read!(f, img)
# Do something with frames
end
close(f)
```
```@docs
VideoIO.openvideo
```
Alternatively, you can open the video stream in a file directly with
`VideoIO.openvideo(filename)`, without making an intermediate `AVInput`
object, if you only need the video.
VideoIO also provides an iterator interface for `VideoReader`, which
behaves like other mutable iterators in Julia (e.g. Channels). If iteration is
stopped early, for example with a `break` statement, then it can be resumed in
the same spot by iterating on the same `VideoReader` object. Consequently, if
you have already iterated over all the frames of a `VideoReader` object, then it
will be empty for further iteration unless its position in the video is changed
with `seek`.
```julia
using VideoIO
f = VideoIO.openvideo("video.mp4")
for img in f
# Do something with img
end
# Alternatively use collect(f) to get all of the frames
# Further iteration will show that f is now empty
@assert isempty(f)
close(f)
```
Seeking through the video can be achieved via `seek(f, seconds::Float64)` and `seekstart(f)` to return to the start.
```@docs
VideoIO.seek
```
```@docs
VideoIO.seekstart
```
Frames can be skipped without reading frame content via `skipframe(f)` and `skipframes(f, n)`
```@docs
VideoIO.skipframe
```
```@docs
VideoIO.skipframes
```
Total available frame count is available via `counttotalframes(f)`
```@docs
VideoIO.counttotalframes
```
!!! note H264 videos encoded with `crf>0` have been observed to have 4-fewer frames
available for reading.
### Changing the target pixel format for reading
It can be helpful to be explicit in which pixel format you wish to read frames as.
Here a grayscale video is read and parsed into a `Vector(Array{UInt8}}`
```julia
f = VideoIO.openvideo(filename, target_format=VideoIO.AV_PIX_FMT_GRAY8)
while !eof(f)
img = reinterpret(UInt8, read(f))
end
close(f)
```
## Reading Camera Output
Frames can be read iteratively
```julia
using VideoIO
cam = VideoIO.opencamera()
fps = VideoIO.framerate(cam)
for i in 1:100
img = read(cam)
sleep(1/fps)
end
```
To change settings such as the frame rate or resolution of the captured frames, set the
appropriate value in the `options` positional argument.
```julia
julia> opts = VideoIO.DEFAULT_CAMERA_OPTIONS
VideoIO.AVDict with 2 entries:
"framerate" => "30"
"pixel_format" => "uyvy422"
julia> opts["framerate"] = "24"
"24"
julia> opts["video_size"] = "640x480"
"640x480"
julia> opencamera(VideoIO.DEFAULT_CAMERA_DEVICE[], VideoIO.DEFAULT_CAMERA_FORMAT[], opts)
VideoReader(...)
```
Or more simply, change the default. For example:
```julia
julia> VideoIO.DEFAULT_CAMERA_OPTIONS["video_size"] = "640x480"
julia> VideoIO.DEFAULT_CAMERA_OPTIONS["framerate"] = 30
julia> julia> opencamera()
VideoReader(...)
```
## Video Properties & Metadata
```@docs
VideoIO.get_start_time
```
```@docs
VideoIO.get_time_duration
```
```@docs
VideoIO.get_duration
```
```@docs
VideoIO.get_number_frames
```
| VideoIO | https://github.com/JuliaIO/VideoIO.jl.git |
|
[
"MIT"
] | 1.1.0 | 4aaf8a88550d628b8142ed6772901fdf9cef55fd | docs | 376 | # Utilities
## Test Videos
A small number of test videos are available through VideoIO.TestVideos.
These are short videos in a variety of formats with non-restrictive
(public domain or Creative Commons) licenses.
```@docs
VideoIO.TestVideos.available
```
```@docs
VideoIO.testvideo
```
```@docs
VideoIO.TestVideos.download_all
```
```@docs
VideoIO.TestVideos.remove_all
```
| VideoIO | https://github.com/JuliaIO/VideoIO.jl.git |
|
[
"MIT"
] | 1.1.0 | 4aaf8a88550d628b8142ed6772901fdf9cef55fd | docs | 3647 | # Writing Videos
Note: Writing of audio streams is not yet implemented
## Single-step Encoding
Videos can be encoded directly from image stack using `VideoIO.save(filename::String, imgstack::Array)` where `imgstack` is an array of image arrays with identical type and size.
The entire image stack can be encoded in a single step:
```julia
import VideoIO
encoder_options = (crf=23, preset="medium")
VideoIO.save("video.mp4", imgstack, framerate=30, encoder_options=encoder_options)
```
```@docs
VideoIO.save
```
## Iterative Encoding
Alternatively, videos can be encoded iteratively within custom loops.
```julia
using VideoIO
framestack = map(x->rand(UInt8, 100, 100), 1:100) #vector of 2D arrays
encoder_options = (crf=23, preset="medium")
framerate=24
open_video_out("video.mp4", framestack[1], framerate=framerate, encoder_options=encoder_options) do writer
for frame in framestack
write(writer, frame)
end
end
```
An example saving a series of png files as a video:
```julia
using VideoIO, ProgressMeter
dir = "" #path to directory holding images
imgnames = filter(x->occursin(".png",x), readdir(dir)) # Populate list of all .pngs
intstrings = map(x->split(x,".")[1], imgnames) # Extract index from filenames
p = sortperm(parse.(Int, intstrings)) #sort files numerically
imgnames = imgnames[p]
encoder_options = (crf=23, preset="medium")
firstimg = load(joinpath(dir, imgnames[1]))
open_video_out("video.mp4", firstimg, framerate=24, encoder_options=encoder_options) do writer
@showprogress "Encoding video frames.." for i in eachindex(imgnames)
img = load(joinpath(dir, imgnames[i]))
write(writer, img)
end
end
```
```@docs
VideoIO.open_video_out
```
```@docs
Base.write(writer::VideoIO.VideoWriter, img, index::Int)
```
```@docs
VideoIO.close_video_out!
```
## Supported Colortypes
Encoding of the following image element color types currently supported:
- `UInt8`
- `Gray{N0f8}`
- `RGB{N0f8}`
## Encoder Options
The `encoder_options` keyword argument allows control over FFmpeg encoding
options. Optional fields can be found
[here](https://ffmpeg.org/ffmpeg-codecs.html#Codec-Options).
More details about options specific to h264 can be found [here](https://trac.ffmpeg.org/wiki/Encode/H.264).
Some example values for the `encoder_options` keyword argument are:
| Goal | `encoder_options` value |
|:----:|:------|
| Perceptual compression, h264 default. Best for most cases | ```(crf=23, preset="medium")``` |
| Lossless compression. Fastest, largest file size | ```(crf=0, preset="ultrafast")``` |
| Lossless compression. Slowest, smallest file size | ```(crf=0, preset="veryslow")``` |
| Direct control of bitrate and frequency of intra frames (every 10) | ```(bit_rate = 400000, gop_size = 10, max_b_frames = 1)``` |
If a hyphenated parameter is needed, it can be added using `var"param-name" = value`.
## Lossless Encoding
### Lossless RGB
If lossless encoding of `RGB{N0f8}` is required, _true_ lossless requires passing `codec_name = "libx264rgb"` to the function to avoid the lossy RGB->YUV420 conversion, as well as adding `crf=0` in `encoder_options`.
### Lossless Grayscale
If lossless encoding of `Gray{N0f8}` or `UInt8` is required, `crf=0` should be set, as well as `color_range=2` to ensure full 8-bit pixel color representation. i.e.
```(color_range=2, crf=0, preset="medium")```
### Encoding Performance
See [`util/lossless_video_encoding_testing.jl`](https://github.com/JuliaIO/VideoIO.jl/blob/master/util/lossless_video_encoding_testing.jl) for testing of losslessness, speed, and compression as a function of h264 encoding preset, for 3 example videos.
| VideoIO | https://github.com/JuliaIO/VideoIO.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 512 | using VectorizationBase
using Documenter
makedocs(;
modules = [VectorizationBase],
authors = "Chris Elrod",
repo = "https://github.com/JuliaSIMD/VectorizationBase.jl/blob/{commit}{path}#L{line}",
sitename = "VectorizationBase.jl",
format = Documenter.HTML(;
prettyurls = get(ENV, "CI", "false") == "true",
canonical = "https://JuliaSIMD.github.io/VectorizationBase.jl"
),
pages = ["Home" => "index.md"],
strict = false
)
deploydocs(; repo = "github.com/JuliaSIMD/VectorizationBase.jl")
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 18666 | module VectorizationBase
if isdefined(Base, :Experimental) &&
isdefined(Base.Experimental, Symbol("@max_methods"))
@eval Base.Experimental.@max_methods 1
end
import StaticArrayInterface, LinearAlgebra, Libdl, IfElse, LayoutPointers
const ArrayInterface = StaticArrayInterface
using StaticArrayInterface:
contiguous_axis,
contiguous_axis_indicator,
contiguous_batch_size,
stride_rank,
device,
CPUPointer,
CPUIndex,
known_length,
known_first,
known_last,
static_size,
static_strides,
offsets,
static_first,
static_last,
static_length
import IfElse: ifelse
using CPUSummary:
cache_type,
num_cache,
num_cache_levels,
num_cores,
num_l1cache,
num_l2cache,
cache_associativity,
num_l3cache,
sys_threads,
cache_inclusive,
num_l4cache,
cache_linesize,
num_machines,
cache_size,
num_sockets
using HostCPUFeatures:
register_size,
static_sizeof,
fast_int64_to_double,
pick_vector_width,
pick_vector_width_shift,
prevpow2,
simd_integer_register_size,
fma_fast,
smax,
smin,
has_feature,
has_opmask_registers,
register_count,
static_sizeof,
cpu_name,
register_size,
unwrap,
intlog2,
nextpow2,
fast_half
using SIMDTypes:
Bit,
FloatingTypes,
SignedHW,
UnsignedHW,
IntegerTypesHW,
NativeTypesExceptBitandFloat16,
NativeTypesExceptBit,
NativeTypesExceptFloat16,
NativeTypes,
_Vec
using LayoutPointers:
AbstractStridedPointer,
StridedPointer,
StridedBitPointer,
memory_reference,
stridedpointer,
zstridedpointer,
similar_no_offset,
similar_with_offset,
grouped_strided_pointer,
stridedpointers,
bytestrides,
DensePointerWrapper,
zero_offsets
using Static
using Static: One, Zero, eq, ne, lt, le, gt, ge
@inline function promote(x::X, y::Y) where {X,Y}
T = promote_type(X, Y)
convert(T, x), convert(T, y)
end
@inline function promote(x::X, y::Y, z::Z) where {X,Y,Z}
T = promote_type(promote_type(X, Y), Z)
convert(T, x), convert(T, y), convert(T, z)
end
asbool(::Type{True}) = true
asbool(::Type{False}) = false
# TODO: see if `@inline` is good enough.
# @inline asvalbool(r) = Val(map(Bool, r))
# @inline asvalint(r) = Val(map(Int, r))
@generated function asvalint(r::T) where {T<:Tuple{Vararg{StaticInt}}}
t = Expr(:tuple)
for s ∈ T.parameters
push!(t.args, s.parameters[1])
end
Expr(:call, Expr(:curly, :Val, t))
end
@generated function asvalbool(r::T) where {T<:Tuple{Vararg{StaticBool}}}
t = Expr(:tuple)
for b ∈ T.parameters
push!(t.args, b === True)
end
Expr(:call, Expr(:curly, :Val, t))
end
@inline val_stride_rank(A) = asvalint(stride_rank(A))
@inline val_dense_dims(A) = asvalbool(ArrayInterface.dense_dims(A))
# doesn't export `Zero` and `One` by default, as these names could conflict with an AD library
export Vec,
Mask,
EVLMask,
MM,
stridedpointer,
vload,
vstore!,
StaticInt,
True,
False,
Bit,
vbroadcast,
mask,
vfmadd,
vfmsub,
vfnmadd,
vfnmsub,
VecUnroll,
Unroll,
pick_vector_width
using Base: llvmcall, VecElement, HWReal, tail
const LLVMCALL = GlobalRef(Base, :llvmcall)
const Boolean = Union{Bit,Bool}
abstract type AbstractSIMD{W,T<:Union{<:StaticInt,NativeTypes}} <: Real end
abstract type AbstractSIMDVector{W,T} <: AbstractSIMD{W,T} end
"""
VecUnroll{N,W,T,V<:Union{NativeTypes,AbstractSIMD{W,T}}} <: AbstractSIMD{W,T}
`VecUnroll` supports optimizations when interleaving instructions across different memory storage schemes.
`VecUnroll{N,W,T} is typically a tuple of `N+1` `AbstractSIMDVector{W,T}`s. For example, a `VecUnroll{3,8,Float32}`is a collection of 4×`Vec{8,Float32}`.
# Examples
```jldoctest; setup=:(using VectorizationBase)
julia> rgbs = [
(
R = Float32(i) / 255,
G = Float32(i + 100) / 255,
B = Float32(i + 200) / 255
) for i = 0:7:49
]
8-element Vector{NamedTuple{(:R, :G, :B), Tuple{Float32, Float32, Float32}}}:
(R = 0.0, G = 0.39215687, B = 0.78431374)
(R = 0.02745098, G = 0.41960785, B = 0.8117647)
(R = 0.05490196, G = 0.44705883, B = 0.8392157)
(R = 0.08235294, G = 0.4745098, B = 0.8666667)
(R = 0.10980392, G = 0.5019608, B = 0.89411765)
(R = 0.13725491, G = 0.5294118, B = 0.92156863)
(R = 0.16470589, G = 0.5568628, B = 0.9490196)
(R = 0.19215687, G = 0.58431375, B = 0.9764706)
julia> ret = vload(
stridedpointer(reinterpret(reshape, Float32, rgbs)),
Unroll{1,1,3,2,8,zero(UInt),1}((1, 1))
)
3 x Vec{8, Float32}
Vec{8, Float32}<0.0f0, 0.02745098f0, 0.05490196f0, 0.08235294f0, 0.10980392f0, 0.13725491f0, 0.16470589f0, 0.19215687f0>
Vec{8, Float32}<0.39215687f0, 0.41960785f0, 0.44705883f0, 0.4745098f0, 0.5019608f0, 0.5294118f0, 0.5568628f0, 0.58431375f0>
Vec{8, Float32}<0.78431374f0, 0.8117647f0, 0.8392157f0, 0.8666667f0, 0.89411765f0, 0.92156863f0, 0.9490196f0, 0.9764706f0>
julia> typeof(ret)
VecUnroll{2, 8, Float32, Vec{8, Float32}}
```
While the `R`, `G`, and `B` are interleaved in `rgb`s, they have effectively been split out in `ret`
(the first contains all 8 `R` values, with `G` and `B` in the second and third, respectively).
To optimize for the user's CPU, in real code it would typically be better to use `Int(pick_vector_width(Float32))` # # following two definitions are for checking that you aren't accidentally creating `VecUnroll{0}`s.
in place of `8` (`W`) in the `Unroll` construction. # @inline (VecUnroll(data::Tuple{V,Vararg{V,N}})::VecUnroll{N,W,T,V}) where {N,W,T,V<:AbstractSIMD{W,T}} = (@assert(N > 0); new{N,W,T,V}(data))
"""
struct VecUnroll{N,W,T,V<:Union{NativeTypes,AbstractSIMD{W,T}}} <:
AbstractSIMD{W,T}
data::Tuple{V,Vararg{V,N}}
@inline (VecUnroll(
data::Tuple{V,Vararg{V,N}}
)) where {N,W,T,V<:AbstractSIMD{W,T}} = new{N,W,T,V}(data)
@inline (VecUnroll(data::Tuple{T,Vararg{T,N}})) where {N,T<:NativeTypes} =
new{N,1,T,T}(data)
# # following two definitions are for checking that you aren't accidentally creating `VecUnroll{0}`s.
# @inline (VecUnroll(data::Tuple{V,Vararg{V,N}})::VecUnroll{N,W,T,V}) where {N,W,T,V<:AbstractSIMD{W,T}} = (@assert(N > 0); new{N,W,T,V}(data))
# @inline (VecUnroll(data::Tuple{T,Vararg{T,N}})::VecUnroll{N,T,T}) where {N,T<:NativeTypes} = (@assert(N > 0); new{N,1,T,T}(data))
# @inline VecUnroll{N,W,T,V}(data::Tuple{V,Vararg{V,N}}) where {N,W,T,V<:AbstractSIMDVector{W,T}} = new{N,W,T,V}(data)
# @inline (VecUnroll(data::Tuple{V,Vararg{V,N}})::VecUnroll{N,W,T,Vec{W,T}}) where {N,W,T,V<:AbstractSIMDVector{W,T}} = new{N,W,T,V}(data)
# @inline (VecUnroll(data::Tuple{V,Vararg{V,N}})::VecUnroll{N,W,T,V}) where {N,W,T,V<:AbstractSIMDVector{W,T}} = new{N,W,T,V}(data)
end
# @inline VecUnroll(data::Tuple) = VecUnroll(promote(data))
# const AbstractSIMD{W,T} = Union{AbstractSIMDVector{W,T},VecUnroll{<:Any,W,T}}
const IntegerTypes = Union{StaticInt,IntegerTypesHW}
const VecOrScalar = Union{AbstractSIMDVector,NativeTypes}
const NativeTypesV = Union{AbstractSIMD,NativeTypes,StaticInt}
# const NativeTypesV = Union{AbstractSIMD,NativeTypes,StaticInt}
const IntegerTypesV = Union{AbstractSIMD{<:Any,<:IntegerTypes},IntegerTypesHW}
struct Vec{W,T} <: AbstractSIMDVector{W,T}
data::NTuple{W,Core.VecElement{T}}
@inline Vec{W,T}(x::NTuple{W,Core.VecElement{T}}) where {W,T<:NativeTypes} =
new{W,T}(x)
@generated function Vec(
x::Tuple{Core.VecElement{T},Vararg{Core.VecElement{T},_W}}
) where {_W,T<:NativeTypes}
W = _W + 1
# @assert W === pick_vector_width(W, T)# || W === 8
vtyp = Expr(:curly, :Vec, W, T)
Expr(:block, Expr(:meta, :inline), Expr(:(::), Expr(:call, vtyp, :x), vtyp))
end
# @inline function Vec(x::NTuple{W,<:Core.VecElement}) where {W}
# T = eltype(x)
# @assert W === pick_vector_width(W, T)
# # @assert ispow2(W) && (W ≤ max(pick_vector_width(W, T), 8))
# new{W,T}(x)
# end
end
Base.:*(::Vec, y::Zero) = y
Base.:*(x::Zero, ::Vec) = x
@inline Base.copy(v::AbstractSIMDVector) = v
@inline asvec(x::_Vec) = Vec(x)
@inline asvec(x) = x
@inline data(vu::VecUnroll) = getfield(vu, :data)
@inline unrolleddata(x) = x
@inline unrolleddata(x::VecUnroll) = getfield(x, :data)
@inline _demoteint(::Type{T}) where {T} = T
@inline _demoteint(::Type{Int64}) = Int32
@inline _demoteint(::Type{UInt64}) = UInt32
# abstract type AbstractMask{W,U<:Union{UnsignedHW,UInt128,UInt256,UInt512,UInt1024}} <: AbstractSIMDVector{W,Bit} end
abstract type AbstractMask{W,U<:Union{UnsignedHW,UInt128}} <:
AbstractSIMDVector{W,Bit} end
struct Mask{W,U} <: AbstractMask{W,U}
u::U
@inline function Mask{W,U}(u::Unsigned) where {W,U} # ignores U...
U2 = mask_type(StaticInt{W}())
new{W,U2}(u % U2)
end
end
struct EVLMask{W,U} <: AbstractMask{W,U}
u::U
evl::UInt32
@inline function EVLMask{W,U}(u::Unsigned, evl) where {W,U} # ignores U...
U2 = mask_type(StaticInt{W}())
new{W,U2}(u % U2, evl % UInt32)
end
end
const AnyMask{W} =
Union{AbstractMask{W},VecUnroll{<:Any,W,Bit,<:AbstractMask{W}}}
@inline Mask{W}(u::U) where {W,U<:Unsigned} = Mask{W,U}(u)
@inline EVLMask{W}(u::U, i) where {W,U<:Unsigned} = EVLMask{W,U}(u, i)
@inline Mask{1}(b::Bool) = b
@inline EVLMask{1}(b::Bool, i) = b
@inline Mask(m::EVLMask{W,U}) where {W,U} = Mask{W,U}(getfield(m, :u))
# Const prop is good enough; added an @inferred test to make sure.
# Removed because confusion can cause more harm than good.
@inline Base.broadcastable(v::AbstractSIMDVector) = Ref(v)
Vec{W,T}(x::Vararg{NativeTypes,W}) where {W,T<:NativeTypes} =
Vec(ntuple(w -> Core.VecElement{T}(x[w]), Val{W}()))
Vec{1,T}(x::Union{Float32,Float64}) where {T<:NativeTypes} = T(x)
Vec{1,T}(
x::Union{Int8,UInt8,Int16,UInt16,Int32,UInt32,Int64,UInt64,Bool}
) where {T<:NativeTypes} = T(x)
@inline Base.length(::AbstractSIMDVector{W}) where {W} = W
@inline Base.size(::AbstractSIMDVector{W}) where {W} = (W,)
@inline Base.eltype(::AbstractSIMD{W,T}) where {W,T} = T
@inline Base.conj(v::AbstractSIMDVector) = v # so that things like dot products work.
@inline Base.adjoint(v::AbstractSIMDVector) = v # so that things like dot products work.
@inline Base.transpose(v::AbstractSIMDVector) = v # so that things like dot products work.
# Not using getindex/setindex as names to emphasize that these are generally treated as single objects, not collections.
@generated function extractelement(
v::Vec{W,T},
i::I
) where {W,I<:IntegerTypesHW,T}
typ = LLVM_TYPES[T]
instrs = """
%res = extractelement <$W x $typ> %0, i$(8sizeof(I)) %1
ret $typ %res
"""
call = :($LLVMCALL($instrs, $T, Tuple{_Vec{$W,$T},$I}, data(v), i))
Expr(:block, Expr(:meta, :inline), call)
end
@generated function insertelement(
v::Vec{W,T},
x::T,
i::I
) where {W,I<:IntegerTypesHW,T}
typ = LLVM_TYPES[T]
instrs = """
%res = insertelement <$W x $typ> %0, $typ %1, i$(8sizeof(I)) %2
ret <$W x $typ> %res
"""
call = :(Vec(
$LLVMCALL($instrs, _Vec{$W,$T}, Tuple{_Vec{$W,$T},$T,$I}, data(v), x, i)
))
Expr(:block, Expr(:meta, :inline), call)
end
@inline (v::AbstractSIMDVector)(i::IntegerTypesHW) =
extractelement(v, i - one(i))
@inline (v::AbstractSIMDVector)(i::Integer) = extractelement(v, Int(i) - 1)
Base.@propagate_inbounds (vu::VecUnroll)(i::Integer, j::Integer) =
getfield(vu, :data)[j](i)
@inline Base.Tuple(v::Vec{W}) where {W} = ntuple(v, Val{W}())
# Use with care in function signatures; try to avoid the `T` to stay clean on Test.detect_unbound_args
@inline data(v) = v
@inline data(v::Vec) = getfield(v, :data)
function Base.show(io::IO, v::AbstractSIMDVector{W,T}) where {W,T}
name = typeof(v)
print(io, "$(name)<")
for w ∈ 1:W
print(io, repr(extractelement(v, w - 1)))
w < W && print(io, ", ")
end
print(io, ">")
end
Base.bitstring(m::AbstractMask{W}) where {W} = bitstring(data(m))[end-W+1:end]
function Base.show(io::IO, m::AbstractMask{W}) where {W}
bits = data(m)
if m isa EVLMask
print(io, "EVLMask{$W,Bit}<")
else
print(io, "Mask{$W,Bit}<")
end
for w ∈ 0:W-1
print(io, (bits & 0x01) % Int)
bits >>= 0x01
w < W - 1 && print(io, ", ")
end
print(io, ">")
end
function Base.show(io::IO, vu::VecUnroll{N,W,T,V}) where {N,W,T,V}
println(io, "$(N+1) x $V")
d = data(vu)
for n = 1:N+1
show(io, d[n])
n > N || println(io)
end
end
"""
The name `MM` type refers to _MM registers such as `XMM`, `YMM`, and `ZMM`.
`MMX` from the original MMX SIMD instruction set is a [meaningless initialism](https://en.wikipedia.org/wiki/MMX_(instruction_set)#Naming).
The `MM{W,X}` type is used to represent SIMD indexes of width `W` with stride `X`.
"""
struct MM{W,X,I<:Union{HWReal,StaticInt}} <: AbstractSIMDVector{W,I}
i::I
@inline MM{W,X}(i::T) where {W,X,T<:Union{HWReal,StaticInt}} =
new{W,X::Int,T}(i)
end
@inline MM(i::MM{W,X}) where {W,X} = MM{W,X}(getfield(i, :i))
@inline MM{W}(i::Union{HWReal,StaticInt}) where {W} = MM{W,1}(i)
@inline MM{W}(i::Union{HWReal,StaticInt}, ::StaticInt{X}) where {W,X} =
MM{W,X}(i)
@inline data(i::MM) = getfield(i, :i)
@inline extractelement(i::MM{W,X,I}, j) where {W,X,I<:HWReal} =
getfield(i, :i) + (X % I) * (j % I)
@inline extractelement(i::MM{W,X,I}, j) where {W,X,I<:StaticInt} =
getfield(i, :i) + X * j
Base.propertynames(::AbstractSIMD) = ()
function Base.getproperty(::AbstractSIMD, ::Symbol)
throw(
ErrorException(
"""
`Base.getproperty` not defined on AbstractSIMD.
If you wish to work with the data as a tuple, it is recommended to use `Tuple(v)`. Once you have an ordinary tuple, you can access
individual elements normally. Alternatively, you can index using parenthesis, e.g. `v(1)` indexes the first element.
Parenthesis are used instead of `getindex`/square brackets because `AbstractSIMD` objects represent a single number, and
for `x::Number`, `x[1] === x`.
If you wish to perform a reduction on the collection, the naming convention is prepending the base function with a `v`. These functions
are not overloaded, because for `x::Number`, `sum(x) === x`. Functions include `vsum`, `vprod`, `vmaximum`, `vminimum`, `vany`, and `vall`.
If you wish to define a new operation applied to the entire vector, do not define it in terms of operations on the individual eleemnts.
This will often lead to bad code generation -- bad in terms of both performance, and often silently producing incorrect results!
Instead, implement them in terms of existing functions defined on `::AbstractSIMD`. Please feel free to file an issue if you would like
clarification, and especially if you think the function may be useful for others and should be included in `VectorizationBase.jl`.
"""
)
)
end
"""
pause()
For use in spin-and-wait loops, like spinlocks.
"""
@inline pause() = ccall(:jl_cpu_pause, Cvoid, ())
include("static.jl")
include("cartesianvindex.jl")
include("early_definitions.jl")
include("promotion.jl")
include("llvm_types.jl")
include("lazymul.jl")
include("strided_pointers/stridedpointers.jl")
# include("strided_pointers/bitpointers.jl")
include("strided_pointers/cartesian_indexing.jl")
include("strided_pointers/cse_stridemultiples.jl")
include("llvm_intrin/binary_ops.jl")
include("vector_width.jl")
include("ranges.jl")
include("llvm_intrin/conversion.jl")
include("llvm_intrin/masks.jl")
include("llvm_intrin/intrin_funcs.jl")
include("llvm_intrin/memory_addr.jl")
include("llvm_intrin/unary_ops.jl")
include("llvm_intrin/vbroadcast.jl")
include("llvm_intrin/vector_ops.jl")
include("llvm_intrin/nonbroadcastingops.jl")
include("llvm_intrin/integer_fma.jl")
include("llvm_intrin/conflict.jl")
include("llvm_intrin/vfmaddsub.jl")
include("vecunroll/memory.jl")
include("vecunroll/mappedloadstore.jl")
include("vecunroll/fmap.jl")
include("base_defs.jl")
include("alignment.jl")
include("special/misc.jl")
include("special/double.jl")
include("special/exp.jl")
include("special/verf.jl")
# include("special/log.jl")
demoteint(::Type{T}, W) where {T} = False()
demoteint(::Type{UInt64}, W::StaticInt) = gt(W, pick_vector_width(UInt64))
demoteint(::Type{Int64}, W::StaticInt) = gt(W, pick_vector_width(Int64))
@generated function simd_vec(
::DemoteInt,
y::_T,
x::Vararg{_T,_W}
) where {DemoteInt<:StaticBool,_T,_W}
W = 1 + _W
T = DemoteInt === True ? _demoteint(_T) : _T
trunc = T !== _T
Wfull = nextpow2(W)
ty = LLVM_TYPES[T]
init = W == Wfull ? "undef" : "zeroinitializer"
instrs = ["%v0 = insertelement <$Wfull x $ty> $init, $ty %0, i32 0"]
Tup = Expr(:curly, :Tuple, T)
for w ∈ 1:_W
push!(
instrs,
"%v$w = insertelement <$Wfull x $ty> %v$(w-1), $ty %$w, i32 $w"
)
push!(Tup.args, T)
end
push!(instrs, "ret <$Wfull x $ty> %v$_W")
llvmc = :($LLVMCALL($(join(instrs, "\n")), _Vec{$Wfull,$T}, $Tup))
trunc ? push!(llvmc.args, :(y % $T)) : push!(llvmc.args, :y)
for w ∈ 1:_W
ref = Expr(:ref, :x, w)
trunc && (ref = Expr(:call, :%, ref, T))
push!(llvmc.args, ref)
end
meta = Expr(:meta, :inline)
if VERSION >= v"1.8.0-beta"
push!(meta.args, Expr(:purity, true, true, true, true, false))
end
quote
$meta
Vec($llvmc)
end
end
function vec_quote(demote, W, Wpow2, offset::Int = 0)
call = Expr(:call, :simd_vec, Expr(:call, demote ? :True : :False))
Wpow2 += offset
iszero(offset) && push!(call.args, :y)
foreach(
w -> push!(call.args, Expr(:call, getfield, :x, w, false)),
max(1, offset):min(W, Wpow2)-1
)
foreach(w -> push!(call.args, Expr(:call, :zero, :T)), W+1:Wpow2)
call
end
@generated function _vec(
::StaticInt{_Wpow2},
::DemoteInt,
y::T,
x::Vararg{T,_W}
) where {DemoteInt<:StaticBool,_Wpow2,_W,T<:NativeTypes}
W = _W + 1
demote = DemoteInt === True
Wpow2 = demote ? 2_Wpow2 : _Wpow2
if W ≤ Wpow2
vec_quote(demote, W, Wpow2)
else
tup = Expr(:tuple)
offset = 0
while offset < W
push!(tup.args, vec_quote(demote, W, Wpow2, offset))
offset += Wpow2
end
Expr(:call, :VecUnroll, tup)
end
end
@static if VERSION >= v"1.8.0-beta"
Base.@assume_effects total @inline function Vec(
y::T,
x::Vararg{T,_W}
) where {_W,T<:NativeTypes}
W = StaticInt{_W}() + One()
_vec(pick_vector_width(W, T), demoteint(T, W), y, x...)
end
else
@inline function Vec(y::T, x::Vararg{T,_W}) where {_W,T<:NativeTypes}
W = StaticInt{_W}() + One()
_vec(pick_vector_width(W, T), demoteint(T, W), y, x...)
end
end
@inline reduce_to_onevec(f::F, vu::VecUnroll) where {F} =
ArrayInterface.reduce_tup(f, data(vu))
if VERSION >= v"1.7.0" && hasfield(Method, :recursion_relation)
dont_limit = Returns(true)
for f in (vconvert, _vconvert)
for m in methods(f)
m.recursion_relation = dont_limit
end
end
end
include("precompile.jl")
_precompile_()
end # module
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 1511 | """
align(x::Union{Int,Ptr}, [n])
Return aligned memory address with minimum increment. `align` assumes `n` is a
power of 2.
"""
function align end
@inline align(x::Base.Integer) =
(x + Int(register_size() - One())) & Int(-register_size())
@inline align(x::StaticInt) = (x + register_size() - One()) & -register_size()
@inline align(x::Ptr{T}, arg) where {T} =
reinterpret(Ptr{T}, align(reinterpret(UInt, x), arg))
@inline align(x::Ptr{T}) where {T} =
reinterpret(Ptr{T}, align(reinterpret(UInt, x)))
@inline align(x::Union{Integer,StaticInt}, n) =
(nm1 = n - One(); (x + nm1) & -n)
@inline align(x::Union{Integer,StaticInt}, ::StaticInt{N}) where {N} =
(nm1 = N - 1; (x + nm1) & -N)
@inline align(x::Union{Integer,StaticInt}, ::Type{T}) where {T} =
align(x, register_size() ÷ static_sizeof(T))
# @generated align(::Val{L}, ::Type{T}) where {L,T} = align(L, T)
aligntrunc(x::Union{Integer,StaticInt}, n) = x & -n
aligntrunc(x::Union{Integer,StaticInt}) = aligntrunc(x, register_size())
aligntrunc(x::Union{Integer,StaticInt}, ::Type{T}) where {T} =
aligntrunc(x, register_size() ÷ sizeof(T))
alignment(x::Union{Integer,StaticInt}, N = 64) = reinterpret(Int, x) % N
function valloc(
N::Union{Integer,StaticInt},
::Type{T} = Float64,
a = max(register_size(), cache_linesize())
) where {T}
# We want alignment to both vector and cacheline-sized boundaries
size_T = max(1, sizeof(T))
reinterpret(
Ptr{T},
align(reinterpret(UInt, Libc.malloc(size_T * N + a - 1)), a)
)
end
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 15550 | const FASTDICT = Dict{Symbol,Expr}([
:(+) => :(Base.FastMath.add_fast),
:(-) => :(Base.FastMath.sub_fast),
:(*) => :(Base.FastMath.mul_fast),
:(/) => :(Base.FastMath.div_fast),
:(÷) => :(VectorizationBase.vdiv_fast), # VectorizationBase.vdiv == integer, VectorizationBase.vfdiv == float
:(%) => :(Base.FastMath.rem_fast),
:abs2 => :(Base.FastMath.abs2_fast),
:inv => :(Base.FastMath.inv_fast), # this is slower in most benchmarks
:hypot => :(Base.FastMath.hypot_fast),
:max => :(Base.FastMath.max_fast),
:min => :(Base.FastMath.min_fast),
:muladd => :(VectorizationBase.vmuladd_fast),
:fma => :(VectorizationBase.vfma_fast),
:vfmadd => :(VectorizationBase.vfmadd_fast),
:vfnmadd => :(VectorizationBase.vfnmadd_fast),
:vfmsub => :(VectorizationBase.vfmsub_fast),
:vfnmsub => :(VectorizationBase.vfnmsub_fast),
:log => :(SLEEFPirates.log_fast),
:log2 => :(SLEEFPirates.log2_fast),
:log10 => :(SLEEFPirates.log10_fast),
:(^) => :(Base.FastMath.pow_fast)
])
for (op, f) ∈ [
(:(Base.:-), :vsub),
(:(Base.FastMath.sub_fast), :vsub_fast),
# (:(Base.FastMath.abs2_fast),:vabs2_fast),
(:(Base.inv), :vinv),
# (:(Base.FastMath.inv_fast),:vinv_fast),
(:(Base.abs), :vabs),
(:(Base.round), :vround),
(:(Base.floor), :vfloor),
(:(Base.ceil), :vceil),
(:(Base.trunc), :vtrunc),
(:(Base.unsafe_trunc), :vtrunc),
(:(Base.signed), :vsigned),
(:(Base.unsigned), :vunsigned),
(:(Base.float), :vfloat),
(:(Base.sqrt), :vsqrt),
(:(Base.leading_zeros), :vleading_zeros),
(:(Base.trailing_zeros), :vtrailing_zeros),
(:(Base.count_ones), :vcount_ones)
]
@eval begin
@inline $op(a::AbstractSIMD) = $f(a)
end
end
@inline vfloat(a::Vec{W,Float16}) where {W} = _vfloat16(a, fast_half())
@inline _vfloat16(a::Vec{W,Float16}, ::True) where {W} = a
@inline _vfloat16(a::Vec{W,Float16}, ::False) where {W} =
convert(Vec{W,Float32}, a)
@inline Base.:(~)(v::AbstractSIMD{W,T}) where {W,T<:IntegerTypesHW} =
v ⊻ vbroadcast(Val(W), -1 % T)
@inline Base.FastMath.abs2_fast(v::AbstractSIMD) = vmul_fast(v, v)
@inline no_promote(a, b) = (a, b)
for (op, f, promote) ∈ [
(:(Base.:+), :vadd, :promote),
(:(Base.FastMath.add_fast), :vadd_fast, :promote),
(:(Base.:-), :vsub, :promote),
(:(Base.FastMath.sub_fast), :vsub_fast, :promote),
(:(Base.:*), :vmul, :promote),
(:(Base.FastMath.mul_fast), :vmul_fast, :promote),
(:(Base.:/), :vfdiv, :promote_div),
(:(Base.FastMath.div_fast), :vfdiv_fast, :promote_div),
(:(Base.:%), :vrem, :promote_div),
(:(Base.FastMath.rem_fast), :vrem_fast, :promote_div),
(:(Base.:÷), :vdiv, :promote_div),
(:(Base.:<<), :vshl, :promote_div),
(:(Base.:>>), :vashr, :promote_div),
(:(Base.:>>>), :vlshr, :promote_div),
(:(Base.:&), :vand, :promote),
(:(Base.:|), :vor, :promote),
(:(Base.:⊻), :vxor, :promote),
(:(Base.max), :vmax, :no_promote),
(:(Base.min), :vmin, :no_promote),
(:(Base.FastMath.max_fast), :vmax_fast, :no_promote),
(:(Base.FastMath.min_fast), :vmin_fast, :no_promote),
# (:(Base.copysign),:vcopysign,:no_promote),
(:(Base.:(==)), :veq, :no_promote),
(:(Base.:(≠)), :vne, :no_promote),
(:(Base.:(>)), :vgt, :no_promote),
(:(Base.:(≥)), :vge, :no_promote),
(:(Base.:(<)), :vlt, :no_promote),
(:(Base.:(≤)), :vle, :no_promote)
]
@eval begin
# @inline $op(a::AbstractSIMD,b::AbstractSIMD) = ((c,d) = $promote(a,b); $f(c,d))
@inline $op(a::AbstractSIMD, b::AbstractSIMD) =
((c, d) = $promote(a, b); $f(c, d))
@inline $op(a::NativeTypes, b::AbstractSIMDVector) =
((c, d) = $promote(a, b); $f(c, d))
@inline $op(a::AbstractSIMDVector, b::NativeTypes) =
((c, d) = $promote(a, b); $f(c, d))
@inline $op(a::NativeTypes, b::VecUnroll{N,W}) where {N,W} =
((c, d) = $promote(a, b); $f(c, d))
@inline $op(a::VecUnroll{N,W}, b::NativeTypes) where {N,W} =
((c, d) = $promote(a, b); $f(c, d))
@inline $op(
a::IntegerTypesHW,
b::VecUnroll{N,W,T,MM{W,X,T}}
) where {N,W,T<:IntegerTypesHW,X} =
VecUnroll(fmap($op, a, getfield(b, :data)))
@inline $op(
a::VecUnroll{N,W,T,MM{W,X,T}},
b::IntegerTypesHW
) where {N,W,T<:IntegerTypesHW,X} =
VecUnroll(fmap($op, getfield(a, :data), b))
end
end
for op ∈ [:(Base.:(*)), :(Base.FastMath.mul_fast)]
@eval begin
@inline $op(
m::AbstractSIMD{W,B1},
v::AbstractSIMD{W,B2}
) where {W,B1<:Union{Bool,Bit},B2<:Union{Bool,Bit}} = m & v
@inline $op(
m::AbstractSIMD{W,B},
v::AbstractSIMD{W}
) where {W,B<:Union{Bool,Bit}} = ifelse(m, v, zero(v))
@inline $op(
v::AbstractSIMD{W},
m::AbstractSIMD{W,B}
) where {W,B<:Union{Bool,Bit}} = ifelse(m, v, zero(v))
end
end
# copysign needs a heavy hand to avoid ambiguities
@inline Base.copysign(a::VecUnroll, b::AbstractSIMDVector) =
VecUnroll(fmap(vcopysign, getfield(a, :data), b))
@inline Base.copysign(a::VecUnroll, b::VecUnroll) =
VecUnroll(fmap(vcopysign, getfield(a, :data), getfield(b, :data)))
@inline Base.copysign(a::AbstractSIMDVector, b::VecUnroll) =
VecUnroll(fmap(vcopysign, a, getfield(b, :data)))
@inline Base.copysign(a::AbstractSIMDVector, b::AbstractSIMDVector) =
vcopysign(a, b)
@inline Base.copysign(a::NativeTypes, b::VecUnroll{N,W}) where {N,W} =
VecUnroll(fmap(vcopysign, vbroadcast(Val{W}(), a), getfield(b, :data)))
@inline Base.copysign(a::VecUnroll{N,W}, b::Base.HWReal) where {N,W} =
VecUnroll(fmap(vcopysign, getfield(a, :data), vbroadcast(Val{W}(), b)))
@inline Base.copysign(a::IntegerTypesHW, b::AbstractSIMDVector) =
vcopysign(a, b)
@inline Base.copysign(a::AbstractSIMDVector, b::Base.HWReal) = vcopysign(a, b)
for T ∈ [:Rational, :SignedHW, :Float32, :Float64]
@eval begin
@inline function Base.copysign(
a::$T,
b::AbstractSIMDVector{W,T}
) where {W,T<:Union{Float32,Float64,SignedHW}}
v1, v2 = promote(a, b)
vcopysign(v1, v2)
end
@inline Base.copysign(
a::$T,
b::AbstractSIMDVector{W,T}
) where {W,T<:UnsignedHW} = vbroadcast(Val{W}(), abs(a))
@inline Base.copysign(a::$T, b::VecUnroll) =
VecUnroll(fmap(copysign, a, getfield(b, :data)))
end
end
for (op, f) ∈ [
(:(Base.:+), :vadd),
(:(Base.FastMath.add_fast), :vadd_fast),
(:(Base.:-), :vsub),
(:(Base.FastMath.sub_fast), :vsub_fast),
(:(Base.:*), :vmul),
(:(Base.FastMath.mul_fast), :vmul_fast)
]
@eval begin
@inline $op(m::MM, j::NativeTypes) = ($f(m, j))
@inline $op(j::NativeTypes, m::MM) = ($f(j, m))
@inline $op(m::MM, ::StaticInt{N}) where {N} = $f(m, StaticInt{N}())
@inline $op(::StaticInt{N}, m::MM) where {N} = $f(StaticInt{N}(), m)
end
end
for (op, c) ∈ [(:(>), :(&)), (:(≥), :(&)), (:(<), :(|)), (:(≤), :(|))]
@eval begin
@inline function Base.$op(
v1::AbstractSIMD{W,I},
v2::AbstractSIMD{W,U}
) where {W,I<:Signed,U<:Unsigned}
m1 = $op(v1, zero(I))
m2 = $op(v1 % U, v2)
$c(m1, m2)
end
end
end
for (f, vf) ∈ [
(:convert, :vconvert),
(:reinterpret, :vreinterpret),
(:trunc, :vtrunc),
(:unsafe_trunc, :vtrunc),
(:round, :vround),
(:floor, :vfloor),
(:ceil, :vceil)
]
@eval begin
@inline Base.$f(::Type{T}, x::NativeTypes) where {T<:AbstractSIMD} =
$vf(T, x)
@inline Base.$f(::Type{T}, v::AbstractSIMD) where {T<:NativeTypes} =
$vf(T, v)
@inline Base.$f(::Type{T}, v::AbstractSIMD) where {T<:AbstractSIMD} =
$vf(T, v)
end
end
for (f, vf) ∈ [(:(Base.rem), :vrem), (:(Base.FastMath.rem_fast), :vrem_fast)]
@eval begin
@inline $f(x::NativeTypes, ::Type{T}) where {T<:AbstractSIMD} = $vf(x, T)
@inline $f(v::AbstractSIMD, ::Type{T}) where {T<:NativeTypes} = $vf(v, T)
@inline $f(v::AbstractSIMD, ::Type{T}) where {T<:AbstractSIMD} = $vf(v, T)
end
end
# These are defined here on `Base` functions to avoid `promote`
@inline function Base.:(<<)(
v1::AbstractSIMD{W,T1},
v2::AbstractSIMD{W,T2}
) where {W,T1<:SignedHW,T2<:UnsignedHW}
convert(T1, vshl(convert(T2, v1), v2))
end
@inline function Base.:(<<)(
v1::AbstractSIMD{W,T1},
v2::AbstractSIMD{W,T2}
) where {W,T1<:UnsignedHW,T2<:SignedHW}
convert(T1, vshl(convert(T2, v1), v2))
end
@inline function Base.:(>>)(
v1::AbstractSIMD{W,T1},
v2::AbstractSIMD{W,T2}
) where {W,T1<:SignedHW,T2<:UnsignedHW}
vashr(v1, (v2 % T1))
end
@inline function Base.:(>>)(
v1::AbstractSIMD{W,T1},
v2::AbstractSIMD{W,T2}
) where {W,T1<:UnsignedHW,T2<:SignedHW}
vashr(v1, (v2 % T1))
end
@inline function Base.:(>>>)(
v1::AbstractSIMD{W,T1},
v2::AbstractSIMD{W,T2}
) where {W,T1<:SignedHW,T2<:UnsignedHW}
convert(T1, vlshr(convert(T2, v1), v2))
end
@inline function Base.:(>>>)(
v1::AbstractSIMD{W,T1},
v2::AbstractSIMD{W,T2}
) where {W,T1<:UnsignedHW,T2<:SignedHW}
convert(T2, vlshr(v1, convert(T1, v2)))
end
@inline unrolldata(x) = x
@inline unrolldata(x::VecUnroll) = getfield(x, :data)
@inline function IfElse.ifelse(
m::VecUnroll{<:Any,<:Any,Bit,<:AbstractMask},
a::Real,
b::Real
)
x, y = promote(a, b)
VecUnroll(fmap(ifelse, getfield(m, :data), unrolldata(x), unrolldata(y)))
end
@inline function IfElse.ifelse(
m::VecUnroll{<:Any,<:Any,Bool},
a::Real,
b::Real
)
x, y = promote(a, b)
VecUnroll(fmap(ifelse, getfield(m, :data), unrolldata(x), unrolldata(y)))
end
@inline function promote_except_first(a, b, c)
d, e = promote(b, c)
a, d, e
end
@inline function IfElse.ifelse(
a::AbstractMask,
b::AbstractSIMD,
c::AbstractSIMD
)
y, z = promote(b, c)
vifelse(a, y, z)
end
@inline function IfElse.ifelse(
a::Vec{<:Any,Bool},
b::AbstractSIMD,
c::AbstractSIMD
)
y, z = promote(b, c)
vifelse(tomask(a), y, z)
end
@inline function IfElse.ifelse(a::AbstractMask, b::AbstractSIMD, c::NativeTypes)
y, z = promote(b, c)
vifelse(a, y, z)
end
@inline function IfElse.ifelse(
a::Vec{<:Any,Bool},
b::AbstractSIMD,
c::NativeTypes
)
y, z = promote(b, c)
vifelse(tomask(a), y, z)
end
@inline function IfElse.ifelse(a::AbstractMask, b::NativeTypes, c::AbstractSIMD)
y, z = promote(b, c)
vifelse(a, y, z)
end
@inline function IfElse.ifelse(
a::Vec{<:Any,Bool},
b::NativeTypes,
c::AbstractSIMD
)
y, z = promote(b, c)
vifelse(tomask(a), y, z)
end
@inline function IfElse.ifelse(a::Bool, b::AbstractSIMD, c::AbstractSIMD)
y, z = promote(b, c)
vifelse(a, y, z)
end
@inline function IfElse.ifelse(a::AbstractMask, b::NativeTypes, c::NativeTypes)
y, z = promote(b, c)
vifelse(a, y, z)
end
@inline function IfElse.ifelse(
a::Vec{<:Any,Bool},
b::NativeTypes,
c::NativeTypes
)
y, z = promote(b, c)
vifelse(tomask(a), y, z)
end
@inline function IfElse.ifelse(a::Bool, b::AbstractSIMD, c::NativeTypes)
y, z = promote(b, c)
vifelse(a, y, z)
end
@inline function IfElse.ifelse(a::Bool, b::NativeTypes, c::AbstractSIMD)
y, z = promote(b, c)
vifelse(a, y, z)
end
for (op, f) ∈ [(:(Base.fma), :vfma), (:(Base.muladd), :vmuladd)]
@eval begin
@inline function $op(a::AbstractSIMD, b::AbstractSIMD, c::AbstractSIMD)
x, y, z = promote(a, b, c)
$f(x, y, z)
end
@inline function $op(a::AbstractSIMD, b::AbstractSIMD, c::NativeTypes)
x, y, z = promote(a, b, c)
$f(x, y, z)
end
@inline function $op(a::AbstractSIMD, b::NativeTypes, c::AbstractSIMD)
x, y, z = promote(a, b, c)
$f(x, y, z)
end
@inline function $op(a::NativeTypes, b::AbstractSIMD, c::AbstractSIMD)
x, y, z = promote(a, b, c)
$f(x, y, z)
end
@inline function $op(a::AbstractSIMD, b::NativeTypes, c::NativeTypes)
x, y, z = promote(a, b, c)
$f(x, y, z)
end
@inline function $op(a::NativeTypes, b::AbstractSIMD, c::NativeTypes)
x, y, z = promote(a, b, c)
$f(x, y, z)
end
@inline function $op(a::NativeTypes, b::NativeTypes, c::AbstractSIMD)
x, y, z = promote(a, b, c)
$f(x, y, z)
end
end
end
# TODO: remove
@inline IfElse.ifelse(
f::F,
m::AbstractSIMD{W,B},
args::Vararg{NativeTypesV,K}
) where {W,K,B<:Union{Bool,Bit},F<:Function} = vifelse(f, m, args...)
@inline IfElse.ifelse(m::Bool, a::V, b::V) where {W,V<:AbstractSIMD{W}} =
m ? a : b#vifelse(max_mask(StaticInt{W}()) & m, a, b)
for (f, add, mul) ∈ [
(:fma, :(+), :(*)),
(:muladd, :(+), :(*)),
(:vfma, :(+), :(*)),
(:vmuladd, :(+), :(*)),
(:vfma_fast, :(Base.FastMath.add_fast), :(Base.FastMath.mul_fast)),
(:vmuladd_fast, :(Base.FastMath.add_fast), :(Base.FastMath.mul_fast))
]
if (f !== :fma) && (f !== :muladd)
@eval begin
@inline function $f(a::NativeTypesV, b::NativeTypesV, c::NativeTypesV)
x, y, z = promote(a, b, c)
$f(x, y, z)
end
end
else
f = :(Base.$f)
end
@eval begin
@inline $f(a::Zero, b::NativeTypesV, c::NativeTypesV) = c
@inline $f(a::NativeTypesV, b::Zero, c::NativeTypesV) = c
@inline $f(a::Zero, b::Zero, c::NativeTypesV) = c
@inline $f(a::One, b::Zero, c::NativeTypesV) = c
@inline $f(a::Zero, b::One, c::NativeTypesV) = c
@inline $f(a::One, b::NativeTypesV, c::NativeTypesV) = $add(b, c)
@inline $f(a::NativeTypesV, b::One, c::NativeTypesV) = $add(a, c)
@inline $f(a::One, b::One, c::NativeTypesV) = $add(one(c), c)
@inline $f(a::NativeTypesV, b::NativeTypesV, c::Zero) = $mul(a, b)
@inline $f(a::Zero, b::NativeTypesV, c::Zero) = Zero()
@inline $f(a::NativeTypesV, b::Zero, c::Zero) = Zero()
@inline $f(a::Zero, b::Zero, c::Zero) = Zero()
@inline $f(a::One, b::Zero, c::Zero) = Zero()
@inline $f(a::Zero, b::One, c::Zero) = Zero()
@inline $f(a::One, b::NativeTypesV, c::Zero) = b
@inline $f(a::NativeTypesV, b::One, c::Zero) = a
@inline $f(a::One, b::One, c::Zero) = One()
@inline $f(a::AnyMask, b::NativeTypes, c::NativeTypes) =
vifelse(a, $add(b, c), c)
@inline $f(a::AnyMask, b::NativeTypes, c::AbstractSIMD{W}) where {W} =
vifelse(a, $add(b, c), c)
@inline $f(a::AnyMask, b::AbstractSIMD{W}, c::NativeTypes) where {W} =
vifelse(a, $add(b, c), c)
@inline $f(a::AnyMask, b::AbstractSIMD{W}, c::AbstractSIMD{W}) where {W} =
vifelse(a, $add(b, c), c)
@inline $f(b::NativeTypes, a::AnyMask, c::NativeTypes) =
vifelse(a, $add(b, c), c)
@inline $f(b::NativeTypes, a::AnyMask, c::AbstractSIMD{W}) where {W} =
vifelse(a, $add(b, c), c)
@inline $f(b::AbstractSIMD{W}, a::AnyMask, c::NativeTypes) where {W} =
vifelse(a, $add(b, c), c)
@inline $f(b::AbstractSIMD{W}, a::AnyMask, c::AbstractSIMD{W}) where {W} =
vifelse(a, $add(b, c), c)
@inline $f(a::AnyMask, b::AnyMask, c::NativeTypes) =
vifelse(a & b, c + one(c), c)
@inline $f(a::AnyMask, b::AnyMask, c::AbstractSIMD{W}) where {W} =
vifelse(a & b, c + one(c), c)
end
end
for f ∈ [:(Base.:(+)), :(Base.FastMath.add_fast)]
@eval begin
@inline $f(a::AnyMask, b::NativeTypes) = vifelse(a, $f(b, one(b)), b)
@inline $f(a::AnyMask, b::AbstractSIMD{W}) where {W} =
vifelse(a, $f(b, one(b)), b)
@inline $f(a::NativeTypes, b::AnyMask) = vifelse(b, $f(a, one(a)), a)
@inline $f(a::AbstractSIMD{W}, b::AnyMask) where {W} =
vifelse(b, $f(a, one(a)), a)
@inline $f(a::AnyMask, b::AnyMask) = vadd_fast(a, b)
end
end
# masks
for (vf, f) ∈ [(:vnot, :(!)), (:vnot, :(~))]
@eval begin
@inline Base.$f(m::AbstractSIMD{<:Any,<:Union{Bool,Bit}}) = $vf(m)
end
end
for f ∈ [:typemin, :typemax, :floatmin, :floatmax]
@eval @inline Base.$f(::Type{V}) where {W,T,V<:AbstractSIMD{W,T}} =
convert(V, $f(T))
end
for T ∈ keys(JULIA_TYPE_SIZE)
T === :Bit && continue
@eval @inline $T(v::AbstractSIMD) = vconvert($T, v)
end
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 2692 |
struct NullStep end
struct CartesianVIndex{N,T<:Tuple{Vararg{Union{Int,StaticInt,NullStep},N}}} <:
Base.AbstractCartesianIndex{N}
I::T
@inline CartesianVIndex(
I::T
) where {N,T<:Tuple{Vararg{Union{Int,StaticInt,NullStep},N}}} = new{N,T}(I)
end
Base.length(::CartesianVIndex{N}) where {N} = N
ArrayInterface.known_length(::Type{<:CartesianVIndex{N}}) where {N} = N
Base.Tuple(i::CartesianVIndex) = getfield(i, :I)
function Base.:(:)(I::CartesianVIndex{N}, J::CartesianVIndex{N}) where {N}
CartesianIndices(map((i, j) -> i:j, getfield(I, :I), getfield(J, :I)))
end
Base.@propagate_inbounds Base.getindex(I::CartesianVIndex, i) =
getfield(I, :I)[i]
_ndim(::Type{<:Base.AbstractCartesianIndex{N}}) where {N} = N
@inline gesp(
p::AbstractStridedPointer{T,N},
i::Tuple{CartesianVIndex{N}}
) where {T,N} = gesp(p, getfield(getfield(i, 1, false), :I))
# _ndim(::Type{<:AbstractArray{N}}) where {N} = N
@generated function CartesianVIndex(
I::T
) where {
T<:Tuple{
Vararg{Union{Int,StaticInt,CartesianIndex,CartesianVIndex,NullStep}}
}
}
iexpr = Expr(:tuple)
Tp = T.parameters
q = Expr(:block)
for i in eachindex(Tp)
I_i = Symbol(:I_, i)
push!(q.args, Expr(:(=), I_i, Expr(:ref, :I, i)))
Tpᵢ = Tp[i]
if Tpᵢ <: Base.AbstractCartesianIndex
for n = 1:_ndim(Tpᵢ)
push!(iexpr.args, Expr(:ref, I_i, n))
end
# elseif Tpᵢ === NullStep
# push!(iexpr.args, :(Zero()))
else
push!(iexpr.args, I_i)
end
end
push!(q.args, Expr(:call, :CartesianVIndex, iexpr))
Expr(
:block,
Expr(:meta, :inline),
Expr(
:macrocall,
Symbol("@inbounds"),
LineNumberNode(@__LINE__, Symbol(@__FILE__)),
q
)
)
end
@generated function _maybestaticfirst(a::Tuple{Vararg{Any,N}}) where {N}
quote
$(Expr(:meta, :inline))
Base.Cartesian.@ntuple $N n -> maybestaticfirst(a[n])
end
end
@generated function _maybestaticlast(a::Tuple{Vararg{Any,N}}) where {N}
quote
$(Expr(:meta, :inline))
Base.Cartesian.@ntuple $N n -> maybestaticlast(a[n])
end
end
@inline maybestaticfirst(A::CartesianIndices) =
CartesianVIndex(_maybestaticfirst(A.indices))
@inline maybestaticlast(A::CartesianIndices) =
CartesianVIndex(_maybestaticlast(A.indices))
for (op, f) ∈ [(:(+), :vadd_nsw), (:(-), :vsub_nsw), (:(*), :vmul_nsw)]
@eval begin
@inline Base.$op(a::CartesianVIndex, b) =
CartesianVIndex(fmap($f, getfield(a, :I), b))
@inline Base.$op(a, b::CartesianVIndex) =
CartesianVIndex(fmap($f, a, getfield(b, :I)))
@inline Base.$op(a::CartesianVIndex, b::CartesianVIndex) =
CartesianVIndex(fmap($f, getfield(a, :I), getfield(b, :I)))
end
end
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 3516 | _ispow2(x::Integer) = count_ones(x) < 2
@generated _ispow2(::StaticInt{N}) where {N} =
Expr(:call, ispow2(N) ? :True : :False)
function integer_of_bytes_symbol(bytes::Int, unsigned::Bool = false)
if bytes ≥ 8
unsigned ? :UInt64 : :Int64
elseif bytes ≥ 4
unsigned ? :UInt32 : :Int32
elseif bytes ≥ 2
unsigned ? :UInt16 : :Int16
elseif bytes ≥ 1
unsigned ? :UInt8 : :Int8
else
throw("$bytes is an invalid number of bytes for integers.")
end
end
function integer_of_bytes(bytes::Int)
if bytes ≥ 8
Int64
elseif bytes ≥ 4
Int32
elseif bytes ≥ 2
Int16
elseif bytes ≥ 1
Int8
else
throw("$bytes is an invalid number of bytes for integers.")
end
end
@generated int_type_bytes(::StaticInt{B}) where {B} = integer_of_bytes_symbol(B)
@inline int_type(::Union{Val{W},StaticInt{W}}) where {W} =
int_type_bytes(simd_integer_register_size() ÷ StaticInt{W}())
@generated function _promote_rule(
::Val{W},
::Type{T2},
::StaticInt{RS},
::StaticInt{SIRS}
) where {W,T2<:NativeTypes,RS,SIRS}
if RS ≥ sizeof(T2) * W
return :(Vec{$W,$T2})
elseif T2 <: Signed
return :(Vec{$W,$(integer_of_bytes_symbol(max(1, SIRS ÷ W), false))})
elseif T2 <: Unsigned
return :(Vec{$W,$(integer_of_bytes_symbol(max(1, SIRS ÷ W), true))})
else
return :(Vec{$W,$T2})
end
end
@inline function Base.promote_rule(
::Type{MM{W,X,I}},
::Type{T2}
) where {W,X,I,T2<:NativeTypes}
_promote_rule(Val{W}(), T2, register_size(T2), simd_integer_register_size())
end
@inline integer_preference(::StaticInt{B}) where {B} =
ifelse(ArrayInterface.ge(StaticInt{B}(), StaticInt{8}()), Int, Int32)
@inline pick_integer(::Union{StaticInt{W},Val{W}}) where {W} =
integer_preference(simd_integer_register_size() ÷ StaticInt{W}())
@inline function pick_integer(::Val{W}, ::Type{T}) where {W,T}
BT = static_sizeof(T)
BW = register_size(T) ÷ StaticInt{W}()
I = ifelse(le(BT, BW), T, int_type_bytes(smax(StaticInt{4}(), BW)))
signorunsign(I, issigned(T))
end
mask_type_symbol(W) =
if W <= 8
return :UInt8
elseif W <= 16
return :UInt16
elseif W <= 32
return :UInt32
elseif W <= 64
return :UInt64
else#if W <= 128
return :UInt128
# elseif W <= 256
# return :UInt256
# elseif W <= 512
# return :UInt512
# else#if W <= 1024
# return :UInt1024
end
mask_type(W) =
if W <= 8
return UInt8
elseif W <= 16
return UInt16
elseif W <= 32
return UInt32
elseif W <= 64
return UInt64
else#if W <= 128
return UInt128
# elseif W <= 256
# return UInt256
# elseif W <= 512
# return UInt512
# else#if W <= 1024
# return UInt1024
end
mask_type(::Union{Val{1},StaticInt{1}}) = UInt8#Bool
mask_type(::Union{Val{2},StaticInt{2}}) = UInt8
mask_type(::Union{Val{4},StaticInt{4}}) = UInt8
mask_type(::Union{Val{8},StaticInt{8}}) = UInt8
mask_type(::Union{Val{16},StaticInt{16}}) = UInt16
mask_type(::Union{Val{24},StaticInt{24}}) = UInt32
mask_type(::Union{Val{32},StaticInt{32}}) = UInt32
mask_type(::Union{Val{40},StaticInt{40}}) = UInt64
mask_type(::Union{Val{48},StaticInt{48}}) = UInt64
mask_type(::Union{Val{56},StaticInt{56}}) = UInt64
mask_type(::Union{Val{64},StaticInt{64}}) = UInt64
@generated _mask_type(::StaticInt{W}) where {W} = mask_type_symbol(W)
@inline mask_type(::Type{T}) where {T} = _mask_type(pick_vector_width(T))
@inline mask_type(::Type{T}, ::Union{StaticInt{P},Val{P}}) where {T,P} =
_mask_type(pick_vector_width(StaticInt{P}(), T))
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 16825 | struct LazyMulAdd{M,O,T<:NativeTypesV} <: Number
data::T
@inline LazyMulAdd{M,O,T}(data::T) where {M,O,T<:NativeTypesV} =
new{M,O,T}(data)
end
# O for offset is kind of hard to read next to default of 0?
@inline LazyMulAdd{M,O}(
data::T
) where {M,O,T<:Union{Base.HWReal,AbstractSIMD}} = LazyMulAdd{M,O,T}(data)
@inline LazyMulAdd{M}(data::T) where {M,T<:NativeTypesV} =
LazyMulAdd{M,0,T}(data)
@inline LazyMulAdd{0,O}(data::T) where {O,T<:Union{Base.HWReal,AbstractSIMD}} =
@assert false#StaticInt{O}()
@inline LazyMulAdd{0}(data::T) where {T<:NativeTypesV} = @assert false#StaticInt{0}()
@inline LazyMulAdd(data::T, ::StaticInt{M}) where {M,T} =
LazyMulAdd{M,0,T}(data)
@inline LazyMulAdd(data::T, ::StaticInt{M}, ::StaticInt{O}) where {M,O,T} =
LazyMulAdd{M,O,T}(data)
@inline data(lm::LazyMulAdd) = data(getfield(lm, :data)) # calls data on inner for use with indexing (normally `data` only goes through one layer)
@inline _materialize(a::LazyMulAdd{M,O,I}) where {M,O,I} =
vadd_nsw(vmul_nsw(StaticInt{M}(), getfield(a, :data)), StaticInt{O}())
@inline _materialize(x) = x
@inline Base.convert(::Type{T}, a::LazyMulAdd{M,O,I}) where {M,O,I,T<:Number} =
convert(T, _materialize(a))
@inline Base.convert(
::Type{LazyMulAdd{M,O,I}},
a::LazyMulAdd{M,O,I}
) where {M,O,I} = a
# @inline Base.convert(::Type{LazyMulAdd{M,O,I}}, a::LazyMulAdd{M}) where {M,O,I} = a
# @inline Base.convert(::Type{LazyMulAdd{M,T,I}}, a::LazyMulAdd{M,StaticInt{O},I}) where {M,O,I,T} = a
Base.promote_rule(
::Type{LazyMulAdd{M,O,I}},
::Type{T}
) where {M,O,I<:Number,T} = promote_type(I, T)
Base.promote_rule(
::Type{LazyMulAdd{M,O,MM{W,X,I}}},
::Type{T}
) where {M,O,W,X,I,T} = promote_type(MM{W,X,I}, T)
Base.promote_rule(
::Type{LazyMulAdd{M,O,Vec{W,I}}},
::Type{T}
) where {M,O,W,I,T} = promote_type(Vec{W,I}, T)
@inline lazymul(a, b) = vmul_nsw(a, b)
@inline lazymul(::StaticInt{M}, b) where {M} = LazyMulAdd{M}(b)
@inline lazymul(::StaticInt{1}, b) = b
@inline lazymul(::StaticInt{0}, b) = StaticInt{0}()
@inline lazymul(a, ::StaticInt{M}) where {M} = LazyMulAdd{M}(a)
@inline lazymul(a, ::StaticInt{1}) = a
@inline lazymul(a, ::StaticInt{0}) = StaticInt{0}()
@inline lazymul(a::LazyMulAdd, ::StaticInt{1}) = a
@inline lazymul(::StaticInt{1}, a::LazyMulAdd) = a
@inline lazymul(a::LazyMulAdd, ::StaticInt{0}) = StaticInt{0}()
@inline lazymul(::StaticInt{0}, a::LazyMulAdd) = StaticInt{0}()
# @inline lazymul(a::LazyMulAdd, ::StaticInt{0}) = StaticInt{0}()
# @inline lazymul(::StaticInt{M}, b::MM{W,X}) where {W,M,X} = LazyMulAdd{M}(MM{W}(getfield(b, :data), StaticInt{M}()*StaticInt{X}()))
# @inline lazymul(a::MM{W,X}, ::StaticInt{M}) where {W,M,X} = LazyMulAdd{M}(MM{W}(getfield(a, :data), StaticInt{M}()*StaticInt{X}()))
@inline lazymul(a::MM{W,X}, ::StaticInt{M}) where {W,M,X} =
MM{W}(vmul_nsw(StaticInt{M}(), data(a)), StaticInt{X}() * StaticInt{M}())
@inline lazymul(::StaticInt{M}, b::MM{W,X}) where {W,M,X} =
MM{W}(vmul_nsw(StaticInt{M}(), data(b)), StaticInt{X}() * StaticInt{M}())
@inline lazymul(a::MM{W,X}, ::StaticInt{1}) where {W,X} = a
@inline lazymul(::StaticInt{1}, a::MM{W,X}) where {W,X} = a
@inline lazymul(a::MM{W,X}, ::StaticInt{0}) where {W,X} = StaticInt{0}()
@inline lazymul(::StaticInt{0}, a::MM{W,X}) where {W,X} = StaticInt{0}()
@inline lazymul(::StaticInt{M}, ::StaticInt{N}) where {M,N} =
StaticInt{M}() * StaticInt{N}()
@inline lazymul(::StaticInt{M}, ::StaticInt{1}) where {M} = StaticInt{M}()
@inline lazymul(::StaticInt, ::StaticInt{0}) = StaticInt{0}()
@inline lazymul(::StaticInt{1}, ::StaticInt{M}) where {M} = StaticInt{M}()
@inline lazymul(::StaticInt{0}, ::StaticInt) = StaticInt{0}()
@inline lazymul(::StaticInt{0}, ::StaticInt{0}) = StaticInt{0}()
@inline lazymul(::StaticInt{0}, ::StaticInt{1}) = StaticInt{0}()
@inline lazymul(::StaticInt{1}, ::StaticInt{0}) = StaticInt{0}()
@inline lazymul(::StaticInt{1}, ::StaticInt{1}) = StaticInt{1}()
@inline function lazymul(a::LazyMulAdd{M,O}, ::StaticInt{N}) where {M,O,N}
LazyMulAdd(
getfield(a, :data),
StaticInt{M}() * StaticInt{N}(),
StaticInt{O}() * StaticInt{N}()
)
end
@inline function lazymul(::StaticInt{M}, b::LazyMulAdd{N,O}) where {M,O,N}
LazyMulAdd(
getfield(b, :data),
StaticInt{M}() * StaticInt{N}(),
StaticInt{O}() * StaticInt{M}()
)
end
@inline function lazymul(
a::LazyMulAdd{M,<:MM{W,X}},
::StaticInt{N}
) where {M,N,W,X}
LazyMulAdd(
MM{W}(getfield(a, :data), StaticInt{M}() * StaticInt{N}() * StaticInt{X}()),
StaticInt{M}() * StaticInt{N}()
)
end
@inline function lazymul(
::StaticInt{M},
b::LazyMulAdd{N,<:MM{W,X}}
) where {M,N,W,X}
LazyMulAdd(
MM{W}(getfield(b, :data), StaticInt{M}() * StaticInt{N}() * StaticInt{X}()),
StaticInt{M}() * StaticInt{N}()
)
end
@inline lazymul(a::LazyMulAdd{M,<:MM{W,X}}, ::StaticInt{0}) where {M,W,X} =
StaticInt{0}()
@inline lazymul(::StaticInt{0}, b::LazyMulAdd{N,<:MM{W,X}}) where {N,W,X} =
StaticInt{0}()
@inline lazymul(a::LazyMulAdd{M,<:MM{W,X}}, ::StaticInt{1}) where {M,W,X} = a
@inline lazymul(::StaticInt{1}, b::LazyMulAdd{N,<:MM{W,X}}) where {N,W,X} = b
@inline function lazymul(a::LazyMulAdd{M}, b::LazyMulAdd{N}) where {M,N}
LazyMulAdd(
vmul_nsw(getfield(a, :data), getfield(b, :data)),
StaticInt{M}() * StaticInt{N}()
)
end
vmul_nsw(::LazyMulAdd{M,O}, ::StaticInt{0}) where {M,O} = Zero()
vmul_nsw(::StaticInt{0}, ::LazyMulAdd{M,O}) where {M,O} = Zero()
vmul_nsw(a::LazyMulAdd{M,O}, ::StaticInt{1}) where {M,O} = a
vmul_nsw(::StaticInt{1}, a::LazyMulAdd{M,O}) where {M,O} = a
@inline function vmul_nsw(a::LazyMulAdd{M,O}, ::StaticInt{N}) where {M,N,O}
LazyMulAdd(
getfield(a, :data),
StaticInt{M}() * StaticInt{N}(),
StaticInt{O}() * StaticInt{N}()
)
end
@inline function vmul_nsw(::StaticInt{N}, a::LazyMulAdd{M,O}) where {M,N,O}
LazyMulAdd(
getfield(a, :data),
StaticInt{M}() * StaticInt{N}(),
StaticInt{O}() * StaticInt{N}()
)
end
@inline vmul_nsw(a::LazyMulAdd{M,0}, i::IntegerTypesHW) where {M} =
LazyMulAdd{M,0}(vmul_nsw(getfield(a, :data), i))
@inline vmul_nsw(a::LazyMulAdd{M,O}, i::IntegerTypesHW) where {M,O} =
vmul_nsw(_materialize(a), i)
@inline function Base.:(>>>)(a::LazyMulAdd{M,O}, ::StaticInt{N}) where {M,O,N}
LazyMulAdd(
getfield(a, :data),
StaticInt{M}() >>> StaticInt{N}(),
StaticInt{O}() >>> StaticInt{N}()
)
end
# The approach with `add_indices` is that we try and make `vadd_nsw` behave well
# but for `i` and `j` type combinations where it's difficult,
# we can add specific `add_indices` methods that increment the pointer.
@inline function add_indices(p::Ptr, i, j) # generic fallback
p, vadd_nsw(i, j)
end
@inline vadd_nsw(i::LazyMulAdd, ::Zero) = i
@inline vadd_nsw(::Zero, i::LazyMulAdd) = i
# These following two definitions normally shouldn't be hit
@inline function vadd_nsw(
a::LazyMulAdd{M,O,MM{W,X,I}},
b::IntegerTypesHW
) where {M,O,W,X,I}
MM{W}(
vadd_nsw(vmul_nsw(StaticInt{M}(), data(a)), b),
StaticInt{X}() * StaticInt{M}()
)
end
@inline function vadd_nsw(
b::IntegerTypesHW,
a::LazyMulAdd{M,O,MM{W,X,I}}
) where {M,O,W,X,I}
MM{W}(
vadd_nsw(vmul_nsw(StaticInt{M}(), data(a)), b),
StaticInt{X}() * StaticInt{M}()
)
end
@inline vadd_nsw(a::LazyMulAdd{M,O}, b) where {M,O} =
vadd_nsw(_materialize(a), b)
@inline vadd_nsw(b, a::LazyMulAdd{M,O}) where {M,O} =
vadd_nsw(b, _materialize(a))
@inline vsub_nsw(a::LazyMulAdd{M,O}, b) where {M,O} =
vsub_nsw(_materialize(a), b)
@inline vsub_nsw(b, a::LazyMulAdd{M,O}) where {M,O} =
vsub_nsw(b, _materialize(a))
@inline vadd_nsw(a::LazyMulAdd{M,O,MM{W,X,I}}, ::Zero) where {M,O,W,X,I} = a
@inline vadd_nsw(::Zero, a::LazyMulAdd{M,O,MM{W,X,I}}) where {M,O,W,X,I} = a
@inline function vsub_nsw(
a::LazyMulAdd{M,O,MM{W,X,I}},
b::IntegerTypesHW
) where {M,O,W,X,I}
MM{W}(
vsub_nsw(vmul_nsw(StaticInt{M}(), data(a)), b),
StaticInt{X}() * StaticInt{M}()
)
end
@inline function vsub_nsw(
b::IntegerTypesHW,
a::LazyMulAdd{M,O,MM{W,X,I}}
) where {M,O,W,X,I}
MM{W}(
vsub_nsw(b, vmul_nsw(StaticInt{M}(), data(a))),
(StaticInt{-1}() * StaticInt{X}()) * StaticInt{M}()
)
end
# because we should hit this instead:
@inline add_indices(p::Ptr, b::Integer, a::LazyMulAdd{M,O}) where {M,O} =
(p + b, a)
@inline add_indices(p::Ptr, a::LazyMulAdd{M,O}, b::Integer) where {M,O} =
(p + b, a)
@inline add_indices(p::Ptr{Bit}, b::Integer, a::LazyMulAdd{M,O}) where {M,O} =
(p, vadd_nsw(a, b))
@inline add_indices(p::Ptr{Bit}, a::LazyMulAdd{M,O}, b::Integer) where {M,O} =
(p, vadd_nsw(a, b))
# but in the case of `VecUnroll`s, which skip the `add_indices`, it's useful to still have the former two definitions.
# However, this also forces us to write:
@inline add_indices(p::Ptr, ::StaticInt{N}, a::LazyMulAdd{M,O}) where {M,O,N} =
(p, vadd_nsw(a, StaticInt{N}()))
@inline add_indices(p::Ptr, a::LazyMulAdd{M,O}, ::StaticInt{N}) where {M,O,N} =
(p, vadd_nsw(a, StaticInt{N}()))
@inline add_indices(
p::Ptr{Bit},
::StaticInt{N},
a::LazyMulAdd{M,O}
) where {M,O,N} = (p, vadd_nsw(a, StaticInt{N}()))
@inline add_indices(
p::Ptr{Bit},
a::LazyMulAdd{M,O},
::StaticInt{N}
) where {M,O,N} = (p, vadd_nsw(a, StaticInt{N}()))
@inline function vadd_nsw(::StaticInt{N}, a::LazyMulAdd{M,O}) where {N,M,O}
LazyMulAdd(
getfield(a, :data),
StaticInt{M}(),
StaticInt{O}() + StaticInt{N}()
)
end
@inline function vadd_nsw(a::LazyMulAdd{M,O}, ::StaticInt{N}) where {N,M,O}
LazyMulAdd(
getfield(a, :data),
StaticInt{M}(),
StaticInt{O}() + StaticInt{N}()
)
end
@inline function vadd_nsw(
::StaticInt{N},
a::LazyMulAdd{M,O,MM{W,X,I}}
) where {N,M,O,W,X,I}
LazyMulAdd(
getfield(a, :data),
StaticInt{M}(),
StaticInt{O}() + StaticInt{N}()
)
end
@inline function vadd_nsw(
a::LazyMulAdd{M,O,MM{W,X,I}},
::StaticInt{N}
) where {N,M,O,W,X,I}
LazyMulAdd(
getfield(a, :data),
StaticInt{M}(),
StaticInt{O}() + StaticInt{N}()
)
end
@inline function vadd_nsw(a::LazyMulAdd{M,O}, b::LazyMulAdd{M,A}) where {M,O,A}
LazyMulAdd(
vadd_nsw(getfield(a, :data), getfield(b, :data)),
StaticInt{M}(),
StaticInt{O}() + StaticInt{A}()
)
end
@inline function vsub_nsw(::StaticInt{N}, a::LazyMulAdd{M,O}) where {N,M,O}
LazyMulAdd(
getfield(a, :data),
StaticInt{-1}() * StaticInt{M}(),
StaticInt{N}() - StaticInt{O}()
)
end
@inline vsub_nsw(::Zero, a::LazyMulAdd{M,O}) where {M,O} = StaticInt{-1}() * a
@inline function vsub_nsw(a::LazyMulAdd{M,O}, ::StaticInt{N}) where {N,M,O}
LazyMulAdd(
getfield(a, :data),
StaticInt{M}(),
StaticInt{O}() - StaticInt{N}()
)
end
@inline vsub_nsw(a::LazyMulAdd{M,O}, ::Zero) where {M,O} = a
@inline function vsub_nsw(
::StaticInt{N},
a::LazyMulAdd{M,O,MM{W,X,I}}
) where {N,M,O,W,X,I}
LazyMulAdd(
getfield(a, :data),
StaticInt{-1}() * StaticInt{M}(),
StaticInt{N}() - StaticInt{O}()
)
end
@inline vsub_nsw(::Zero, a::LazyMulAdd{M,O,MM{W,X,I}}) where {M,O,W,X,I} =
StaticInt{-1}() * a
@inline function vsub_nsw(
a::LazyMulAdd{M,O,MM{W,X,I}},
::StaticInt{N}
) where {N,M,O,W,X,I}
LazyMulAdd(
getfield(a, :data),
StaticInt{M}(),
StaticInt{O}() - StaticInt{N}()
)
end
@inline vsub_nsw(a::LazyMulAdd{M,O,MM{W,X,I}}, ::Zero) where {M,O,W,X,I} = a
@inline function vsub_nsw(a::LazyMulAdd{M,O}, b::LazyMulAdd{M,A}) where {M,O,A}
LazyMulAdd(
vsub_nsw(getfield(a, :data), getfield(b, :data)),
StaticInt{M}(),
StaticInt{O}() - StaticInt{A}()
)
end
@inline add_indices(
p::Ptr,
a::LazyMulAdd{M,O,V},
b::LazyMulAdd{N,P,J}
) where {M,O,V<:AbstractSIMDVector,N,P,J<:IntegerTypes} = (gep(p, b), a)
@inline add_indices(
p::Ptr,
b::LazyMulAdd{N,P,J},
a::LazyMulAdd{M,O,V}
) where {M,O,V<:AbstractSIMDVector,N,P,J<:IntegerTypes} = (gep(p, b), a)
@inline add_indices(
p::Ptr,
a::LazyMulAdd{M,O,V},
b::LazyMulAdd{M,P,J}
) where {M,O,V<:AbstractSIMDVector,P,J<:IntegerTypes} = (p, vadd_nsw(a, b))
@inline add_indices(
p::Ptr,
b::LazyMulAdd{M,P,J},
a::LazyMulAdd{M,O,V}
) where {M,O,V<:AbstractSIMDVector,P,J<:IntegerTypes} = (p, vadd_nsw(a, b))
@inline add_indices(
p::Ptr{Bit},
a::LazyMulAdd{M,O,V},
b::LazyMulAdd{N,P,J}
) where {M,O,V<:AbstractSIMDVector,N,P,J<:IntegerTypes} = (p, vadd_nsw(a, b))
@inline add_indices(
p::Ptr{Bit},
b::LazyMulAdd{N,P,J},
a::LazyMulAdd{M,O,V}
) where {M,O,V<:AbstractSIMDVector,N,P,J<:IntegerTypes} = (p, vadd_nsw(a, b))
@inline add_indices(
p::Ptr{Bit},
a::LazyMulAdd{M,O,V},
b::LazyMulAdd{M,P,J}
) where {M,O,V<:AbstractSIMDVector,P,J<:IntegerTypes} = (p, vadd_nsw(a, b))
@inline add_indices(
p::Ptr{Bit},
b::LazyMulAdd{M,P,J},
a::LazyMulAdd{M,O,V}
) where {M,O,V<:AbstractSIMDVector,P,J<:IntegerTypes} = (p, vadd_nsw(a, b))
@inline add_indices(
p::Ptr,
a::AbstractSIMDVector,
b::LazyMulAdd{M,O,I}
) where {M,O,I<:IntegerTypes} = (gep(p, b), a)
@inline add_indices(
p::Ptr,
b::LazyMulAdd{M,O,I},
a::AbstractSIMDVector
) where {M,O,I<:IntegerTypes} = (gep(p, b), a)
@inline add_indices(
p::Ptr{Bit},
a::AbstractSIMDVector,
b::LazyMulAdd{M,O,I}
) where {M,O,I<:IntegerTypes} = (p, vadd_nsw(a, b))
@inline add_indices(
p::Ptr{Bit},
b::LazyMulAdd{M,O,I},
a::AbstractSIMDVector
) where {M,O,I<:IntegerTypes} = (p, vadd_nsw(a, b))
@inline function add_indices(
p::Ptr,
::MM{W,X,StaticInt{A}},
a::LazyMulAdd{M,O,T}
) where {M,O,T<:IntegerTypes,A,W,X}
gep(p, a), MM{W,X}(StaticInt{A}())
end
@inline function add_indices(
p::Ptr,
a::LazyMulAdd{M,O,T},
::MM{W,X,StaticInt{A}}
) where {M,O,T<:IntegerTypes,A,W,X}
gep(p, a), MM{W,X}(StaticInt{A}())
end
@inline function add_indices(
p::Ptr{Bit},
::MM{W,X,StaticInt{A}},
a::LazyMulAdd{M,O,T}
) where {M,O,T<:IntegerTypes,A,W,X}
p, vadd_nsw(MM{W,X}(StaticInt{A}()), _materialize(a))
end
@inline function add_indices(
p::Ptr{Bit},
a::LazyMulAdd{M,O,T},
::MM{W,X,StaticInt{A}}
) where {M,O,T<:IntegerTypes,A,W,X}
p, vadd_nsw(MM{W,X}(StaticInt{A}()), _materialize(a))
end
@generated function add_indices(
p::Ptr,
a::LazyMulAdd{M,O,MM{W,X,StaticInt{I}}},
b::LazyMulAdd{N,P,J}
) where {M,O,W,X,I,N,P,J<:IntegerTypes}
d, r = divrem(M, N)
if iszero(r)
quote
$(Expr(:meta, :inline))
p,
VectorizationBase.LazyMulAdd{$N,$(I * M)}(MM{$W,$d}(getfield(b, :data)))
end
else
quote
$(Expr(:meta, :inline))
gep(p, b), a
end
end
end
@generated function add_indices(
p::Ptr{Bit},
a::LazyMulAdd{M,O,MM{W,X,StaticInt{I}}},
b::LazyMulAdd{N,P,J}
) where {M,O,W,X,I,N,P,J<:IntegerTypes}
d, r = divrem(M, N)
if iszero(r)
quote
$(Expr(:meta, :inline))
p,
VectorizationBase.LazyMulAdd{$N,$(I * M)}(MM{$W,$d}(getfield(b, :data)))
end
else
quote
$(Expr(:meta, :inline))
p, vadd_nsw(_materialize(a), _materialize(b))
end
end
end
@inline add_indices(
p::Ptr,
b::LazyMulAdd{N,P,J},
a::LazyMulAdd{M,O,MM{W,X,StaticInt{I}}}
) where {M,O,W,X,I,N,P,J<:IntegerTypes} = add_indices(p, a, b)
@inline add_indices(
p::Ptr{Bit},
b::LazyMulAdd{N,P,J},
a::LazyMulAdd{M,O,MM{W,X,StaticInt{I}}}
) where {M,O,W,X,I,N,P,J<:IntegerTypes} = add_indices(p, a, b)
@generated function vadd_nsw(
a::LazyMulAdd{M,O,MM{W,X,StaticInt{I}}},
b::LazyMulAdd{N,P,J}
) where {M,O,W,X,I,N,P,J<:IntegerTypes}
d, r = divrem(M, N)
if iszero(r)
quote
$(Expr(:meta, :inline))
VectorizationBase.LazyMulAdd{$N,$(I * M)}(MM{$W,$d}(getfield(b, :data)))
end
else
quote
$(Expr(:meta, :inline))
vadd_nsw(a, _materialize(b))
end
end
end
@inline vadd_nsw(
b::LazyMulAdd{N,P,J},
a::LazyMulAdd{M,O,MM{W,X,StaticInt{I}}}
) where {M,O,W,X,I,N,P,J<:IntegerTypes} = vadd_nsw(a, b)
@inline vadd_nsw(a::VecUnroll, b::LazyMulAdd) =
VecUnroll(fmap(vadd_nsw, getfield(a, :data), b))
@inline vadd_nsw(b::LazyMulAdd, a::VecUnroll) =
VecUnroll(fmap(vadd_nsw, b, getfield(a, :data)))
@inline vadd_nsw(a::LazyMulAdd, b::LazyMulAdd) =
vadd_nsw(_materialize(a), _materialize(b))
@inline vsub_nsw(a::LazyMulAdd, b::LazyMulAdd) =
vsub_nsw(_materialize(a), _materialize(b))
@generated function vsub_nsw(
a::LazyMulAdd{M,O,MM{W,X,StaticInt{I}}},
b::LazyMulAdd{N,P,J}
) where {M,O,W,X,I,N,P,J<:IntegerTypes}
d, r = divrem(M, N)
if iszero(r)
quote
$(Expr(:meta, :inline))
VectorizationBase.LazyMulAdd{$N,$(I * M)}(-MM{$W,$d}(getfield(b, :data)))
end
else
quote
$(Expr(:meta, :inline))
vsub_nsw(a, _materialize(b))
end
end
end
@inline vsub_nsw(
b::LazyMulAdd{N,P,J},
a::LazyMulAdd{M,O,MM{W,X,StaticInt{I}}}
) where {M,O,W,X,I,N,P,J<:IntegerTypes} = vsub_nsw(a, b)
@inline vsub_nsw(a::VecUnroll, b::LazyMulAdd) =
VecUnroll(fmap(vsub_nsw, getfield(a, :data), b))
@inline vsub_nsw(b::LazyMulAdd, a::VecUnroll) =
VecUnroll(fmap(vsub_nsw, b, getfield(a, :data)))
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 7817 |
fast_flags(fast::Bool) = fast ? "nsz arcp contract afn reassoc" : "nsz contract"
# fast_flags(fast::Bool) = fast ? "fast" : "nsz contract"
const LLVM_TYPES = IdDict{Type{<:NativeTypes},String}(
Float16 => "half",
Float32 => "float",
Float64 => "double",
Bit => "i1",
Bool => "i8",
Int8 => "i8",
UInt8 => "i8",
Int16 => "i16",
UInt16 => "i16",
Int32 => "i32",
UInt32 => "i32",
Int64 => "i64",
UInt64 => "i64"
# Int128 => "i128",
# UInt128 => "i128",
# UInt256 => "i256",
# UInt512 => "i512",
# UInt1024 => "i1024",
)
const JULIA_TYPES = IdDict{Type{<:NativeTypes},Symbol}(
Float16 => :Float16,
Float32 => :Float32,
Float64 => :Float64,
Int8 => :Int8,
Int16 => :Int16,
Int32 => :Int32,
Int64 => :Int64,
# Int128 => :Int128,
UInt8 => :UInt8,
UInt16 => :UInt16,
UInt32 => :UInt32,
UInt64 => :UInt64,
# UInt128 => :UInt128,
Bool => :Bool,
Bit => :Bit
# UInt256 => :UInt256,
# UInt512 => :UInt512,
# UInt1024 => :UInt1024,
)
const LLVM_TYPES_SYM = IdDict{Symbol,String}(
:Float16 => "half",
:Float32 => "float",
:Float64 => "double",
:Int8 => "i8",
:Int16 => "i16",
:Int32 => "i32",
:Int64 => "i64",
# :Int128 => "i128",
:UInt8 => "i8",
:UInt16 => "i16",
:UInt32 => "i32",
:UInt64 => "i64",
# :UInt128 => "i128",
:Bool => "i8",
:Bit => "i1",
:Nothing => "void"
# :UInt256 => "i256",
# :UInt512 => "i512",
# :UInt1024 => "i1024",
)
const TYPE_LOOKUP = IdDict{Symbol,Type{<:NativeTypes}}(
:Float16 => Float16,
:Float32 => Float32,
:Float64 => Float64,
:Int8 => Int8,
:Int16 => Int16,
:Int32 => Int32,
:Int64 => Int64,
# :Int128 => Int128,
:UInt8 => UInt8,
:UInt16 => UInt16,
:UInt32 => UInt32,
:UInt64 => UInt64,
# :UInt128 => UInt128,
:Bool => Bool,
:Bit => Bit
# :UInt256 => UInt256,
# :UInt512 => UInt512,
# :UInt1024 => UInt1024
)
const JULIA_TYPE_SIZE = IdDict{Symbol,Int}(
:Float16 => 2,
:Float32 => 4,
:Float64 => 8,
:Int8 => 1,
:Int16 => 2,
:Int32 => 4,
:Int64 => 8,
# :Int128 => 16,
:UInt8 => 1,
:UInt16 => 2,
:UInt32 => 4,
:UInt64 => 8,
# :UInt128 => 16,
:Bool => 1,
:Bit => 1
# :UInt256 => 32,
# :UInt512 => 64,
# :UInt1024 => 128,
)
function _get_alignment(W::Int, sym::Symbol)::Int
sym === :Bit && return 1
T = TYPE_LOOKUP[sym]
if W > 1
Base.datatype_alignment(_Vec{W,T})
else
Base.datatype_alignment(T)
end
end
const JULIAPOINTERTYPE = 'i' * string(8sizeof(Int))
vtype(W, typ::String) = (isone(abs(W)) ? typ : "<$W x $typ>")::String
vtype(W, T::DataType) = vtype(W, LLVM_TYPES[T])::String
vtype(W, T::Symbol) = vtype(W, get(LLVM_TYPES_SYM, T, T))::String
push_julia_type!(x, W, T) =
if W ≤ 1
push!(x, T)
nothing
else
push!(x, Expr(:curly, :_Vec, W, T))
nothing
end
append_julia_type!(x, Ws, Ts) =
for i ∈ eachindex(Ws)
push_julia_type!(x, Ws[i], Ts[i])
end
ptr_suffix(T) = "p0" * suffix(T)
ptr_suffix(W, T) = suffix(W, ptr_suffix(T))
suffix(W::Int, s::String) = W == -1 ? s : 'v' * string(W) * s
suffix(W::Int, T) = suffix(W, suffix(T))
suffix(::Type{Ptr{T}}) where {T} = "p0" * suffix(T)
suffix_jlsym(W::Int, s::Symbol) = suffix(W, suffix(s))
function suffix(T::Symbol)::String
if T === :Float64
"f64"
elseif T === :Float32
"f32"
else
string('i', 8JULIA_TYPE_SIZE[T])
end
end
suffix(@nospecialize(T))::String = suffix(JULIA_TYPES[T])
# Type-dependent LLVM constants
function llvmconst(T, val)::String
iszero(val) ? "zeroinitializer" : "$(LLVM_TYPES[T]) $val"
end
function llvmconst(::Type{Bool}, val)::String
Bool(val) ? "i1 1" : "zeroinitializer"
end
function llvmconst(W::Int, @nospecialize(T), val)::String
isa(val, Number) && iszero(val) && return "zeroinitializer"
typ = (LLVM_TYPES[T])::String
'<' * join(("$typ $(val)" for _ in Base.OneTo(W)), ", ") * '>'
end
function llvmconst(W::Int, ::Type{Bool}, val)::String
Bool(val) ?
'<' * join(("i1 $(Int(val))" for _ in Base.OneTo(W)), ", ") * '>' :
"zeroinitializer"
end
function llvmconst(W::Int, v::String)::String
'<' * join((v for _ in Base.OneTo(W)), ", ") * '>'
end
# function llvmtypedconst(T, val)
# typ = LLVM_TYPES[T]
# iszero(val) && return "$typ zeroinitializer"
# "$typ $val"
# end
# function llvmtypedconst(::Type{Bool}, val)
# Bool(val) ? "i1 1" : "i1 zeroinitializer"
# end
function _llvmcall_expr(ff, WR, R, argt)
if WR ≤ 1
Expr(:call, :ccall, ff, :llvmcall, R, argt)
else
Expr(:call, :ccall, ff, :llvmcall, Expr(:curly, :_Vec, WR, R), argt)
end
end
function llvmname(op::String, WR::Int, WA, T::Symbol, TA::Symbol)
lret = LLVM_TYPES_SYM[T]
ln = WR ≤ 1 ? "llvm.$op" : "llvm.$op.$(suffix(WR,T))"
(isone(abs(WR)) || T !== TA) ? ln * '.' * suffix(maximum(WA), TA) : ln
end
function build_llvmcall_expr(op, WR, R::Symbol, WA, TA, ::Nothing = nothing)
ff = llvmname(op, WR, WA, R, first(TA))
argt = Expr(:tuple)
append_julia_type!(argt.args, WA, TA)
call = _llvmcall_expr(ff, WR, R, argt)
for n ∈ eachindex(TA)
push!(call.args, Expr(:call, :data, Symbol(:v, n)))
end
Expr(
:block,
Expr(:meta, :inline),
isone(abs(WR)) ? call : Expr(:call, :Vec, call)
)
end
function build_llvmcall_expr(op, WR, R::Symbol, WA, TA, flags::String)
lret = LLVM_TYPES_SYM[R]
lvret = vtype(WR, lret)
lop = llvmname(op, WR, WA, R, first(TA))
# instr = "$lvret $flags @$lop"
larg_types = map(vtype, WA, TA)::Vector{String}
decl = "declare $lvret @$(lop)(" * join(larg_types, ", ")::String * ')'
args_for_call = ("$T %$(n-1)" for (n, T) ∈ enumerate(larg_types))
instrs = """%res = call $flags $lvret @$(lop)($(join(args_for_call, ", ")))
ret $lvret %res"""
args = Expr(:curly, :Tuple)
append_julia_type!(args.args, WA, TA)
arg_syms = Vector{Expr}(undef, length(TA))
for n ∈ eachindex(TA)
arg_syms[n] = Expr(:call, :data, Symbol(:v, n))
end
if WR ≤ 1
llvmcall_expr(decl, instrs, R, args, lvret, larg_types, arg_syms)
else
llvmcall_expr(
decl,
instrs,
Expr(:curly, :_Vec, WR, R),
args,
lvret,
larg_types,
arg_syms
)
end
end
@static if VERSION ≥ v"1.6.0-DEV.674"
function llvmcall_expr(
decl::String,
instr::String,
ret::Union{Symbol,Expr},
args::Expr,
lret::String,
largs::Vector{String},
arg_syms::Vector,
callonly::Bool = false,
touchesmemory::Bool = false
)
mod = """
$decl
define $lret @entry($(join(largs, ", "))) alwaysinline {
top:
$instr
}
"""
# attributes #0 = { alwaysinline }
call = Expr(
:call,
LLVMCALL,
(mod::String, "entry")::Tuple{String,String},
ret,
args
)
for arg ∈ arg_syms
push!(call.args, arg)
end
call = Expr(:(::), call, ret)
if first(lret) === '<'
call = Expr(:call, :Vec, call)
end
callonly && return call
meta = if VERSION ≥ v"1.8.0-beta"
purity = if touchesmemory
Expr(:purity, false, false, true, true, false)
else
Expr(:purity, true, true, true, true, false)
end
VERSION >= v"1.9.0-DEV.1019" && push!(purity.args, true)
Expr(:meta, purity, :inline)
else
Expr(:meta, :inline)
end
Expr(:block, meta, call)
# Expr(:block, Expr(:meta, :inline), )
end
else
function llvmcall_expr(
decl::String,
instr::String,
ret::Union{Symbol,Expr},
args::Expr,
lret::String,
largs::Vector{String},
arg_syms::Vector,
callonly::Bool = false,
touchesmemory::Bool = false
)
call = Expr(:call, LLVMCALL, (decl, instr), ret, args)
foreach(arg -> push!(call.args, arg), arg_syms)
if first(lret) === '<'
call = Expr(:call, :Vec, call)
end
callonly && return call
Expr(:block, Expr(:meta, :inline), call)
end
end
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 3237 | function _precompile_()
ccall(:jl_generating_output, Cint, ()) == 1 || return nothing
# for T in (Bool, Int, Float32, Float64)
# for A in (Vector, Matrix)
# precompile(stridedpointer, (A{T},))
# precompile(stridedpointer, (LinearAlgebra.Adjoint{T,A{T}},))
# end
# end
precompile(
offset_ptr,
(Symbol, Symbol, Char, Int, Int, Int, Int, Int, Bool, Int)
)
precompile(
vload_quote_llvmcall_core,
(Symbol, Symbol, Symbol, Int, Int, Int, Int, Bool, Bool, Int)
)
precompile(
vstore_quote,
(Symbol, Symbol, Symbol, Int, Int, Int, Int, Bool, Bool, Bool, Bool, Int)
)
precompile(Tuple{typeof(transpose_vecunroll_quote_W_smaller),Int,Int}) # time: 0.02420761
precompile(
Tuple{
typeof(horizontal_reduce_store_expr),
Int,
Int,
NTuple{4,Int},
Symbol,
Symbol,
Bool,
Int,
Bool
}
) # time: 0.02125804
precompile(Tuple{typeof(transpose_vecunroll_quote_W_larger),Int,Int}) # time: 0.01755242
precompile(Tuple{typeof(shufflevector_instrs),Int,Type,Vector{String},Int}) # time: 0.0159487
precompile(Tuple{typeof(transpose_vecunroll_quote),Int}) # time: 0.014891806
precompile(Tuple{typeof(align),Int,Int}) # time: 0.013784537
precompile(Tuple{typeof(align),Int}) # time: 0.013609074
precompile(
Tuple{
typeof(vstore_transpose_quote),
Int64,
Int64,
Int64,
Int64,
Int64,
Int64,
Int64,
Bool,
Bool,
Bool,
Int64,
Int64,
Symbol,
UInt64,
Bool
}
) # time: 0.006213663
precompile(
Tuple{
typeof(vstore_unroll_i_quote),
Int64,
Int64,
Int64,
Bool,
Bool,
Bool,
Int64,
Bool
}
) # time: 0.002936335
precompile(
Tuple{
typeof(_shuffle_load_quote),
Symbol,
Int,
NTuple{9,Int},
Symbol,
Symbol,
Int,
Int,
Bool,
Int,
UInt
}
)
precompile(
Tuple{
typeof(_shuffle_store_quote),
Symbol,
Int,
NTuple{9,Int},
Symbol,
Symbol,
Int,
Int,
Bool,
Bool,
Bool,
Int,
Bool
}
)
precompile(Tuple{typeof(collapse_expr),Int64,Symbol,Int64}) # time: 0.003906299
# precompile(_pick_vector_width, (Type, Vararg{Type,100}))
# the `"NATIVE_PRECOMPILE_VECTORIZATIONBASE" ∈ keys(ENV)` isn't respected, seems
# like it gets precompiled anyway given that the first condition is `true`.
# if VERSION ≥ v"1.7.0-DEV.346" && "NATIVE_PRECOMPILE_VECTORIZATIONBASE" ∈ keys(ENV)
# set_features!()
# for T ∈ (Float32, Float64)
# W = pick_vector_width(T)
# precompile(>=, (Int, MM{W, 1, Int}))
# for op ∈ (-, Base.FastMath.sub_fast)
# precompile(op, (Vec{W, T}, ))
# end
# for op ∈ (+, -, *, Base.FastMath.add_fast, Base.FastMath.sub_fast, Base.FastMath.mul_fast)
# precompile(op, (Vec{W, T}, Vec{W, T}))
# end
# for op ∈ (VectorizationBase.vfmadd, VectorizationBase.vfmadd_fast)
# precompile(op, (Vec{W, T}, Vec{W, T}, Vec{W, T}))
# end
# end
# end
end
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 8326 | @inline Base.promote(
v1::AbstractSIMD{W,Float16},
v2::AbstractSIMD{W,Float16}
) where {W} = (convert(Float32, v1), convert(Float32, v2))
@inline Base.promote(
a::VecUnroll{N,W,T,Vec{W,T}},
b::VecUnroll{N,W,T,Vec{W,T}},
c::VecUnroll{N,W,T,Vec{W,T}}
) where {N,W,T} = (a, b, c)
ff_promote_rule(::Type{T1}, ::Type{T2}, ::Val{W}) where {T1,T2,W} =
promote_type(T1, T2)
function _ff_promote_rule(::Type{T1}, ::Type{T2}, ::Val{W}) where {T1,T2,W}
T_canon = promote_type(T1, T2)
ifelse(lt(pick_vector_width(T_canon), StaticInt{W}()), T1, T_canon)
end
function __ff_maybe_promote_int(
::True,
::Type{T},
::Type{T_canon},
::Val{W}
) where {T,T_canon,W}
ifelse(
eq(static_sizeof(T_canon), static_sizeof(T)),
T,
pick_integer(Val{W}(), T_canon)
)
end
__ff_maybe_promote_int(
::False,
::Type{T1},
::Type{T_canon},
::Val{W}
) where {T1,T_canon,W} = T_canon
function _ff_promote_rule(
::Type{T1},
::Type{T2},
::Val{W}
) where {T1<:Union{Integer,StaticInt},T2<:Union{Integer,StaticInt},W}
T_canon = promote_type(T1, T2)
__ff_maybe_promote_int(
lt(pick_vector_width(T_canon), StaticInt{W}()),
T1,
T_canon,
Val{W}()
)
end
ff_promote_rule(
::Type{T1},
::Type{T2},
::Val{W}
) where {T1<:Union{Integer,StaticInt},T2<:Union{Integer,StaticInt},W} =
_ff_promote_rule(T1, T2, Val{W}())
ff_promote_rule(
::Type{T1},
::Type{T2},
::Val{W}
) where {T1<:FloatingTypes,T2<:FloatingTypes,W} =
_ff_promote_rule(T1, T2, Val{W}())
Base.promote_rule(
::Type{V},
::Type{T2}
) where {W,T1,T2<:NativeTypes,V<:AbstractSIMDVector{W,T1}} =
Vec{W,ff_promote_rule(T1, T2, Val{W}())}
Base.promote_rule(::Type{V}, ::Type{Bool}) where {V<:AbstractMask} = V
_assemble_vec_unroll(::Val{N}, ::Type{V}) where {N,W,T,V<:AbstractSIMD{W,T}} =
VecUnroll{N,W,T,V}
_assemble_vec_unroll(::Val{N}, ::Type{T}) where {N,T<:NativeTypes} =
VecUnroll{N,1,T,T}
Base.promote_rule(
::Type{VecUnroll{N,W,T1,V}},
::Type{T2}
) where {N,W,T1,V,T2<:NativeTypes} =
_assemble_vec_unroll(Val{N}(), promote_type(V, T2))
Base.promote_rule(
::Type{VecUnroll{N,W,T,V1}},
::Type{V2}
) where {N,W,T,V1,T2,V2<:AbstractSIMDVector{W,T2}} =
_assemble_vec_unroll(Val{N}(), promote_type(V1, V2))
# Base.promote_rule(::Type{VecUnroll{N,W,T,V1}}, ::Type{V2}) where {N,W,T,V1,V2<:AbstractSIMDVector{W}} = _assemble_vec_unroll(Val{N}(), promote_type(V1,V2))
Base.promote_rule(
::Type{VecUnroll{N,W,T,V1}},
::Type{V2}
) where {N,W,T,V1,V2<:AbstractMask{W}} =
_assemble_vec_unroll(Val{N}(), promote_type(V1, V2))
Base.promote_rule(
::Type{VecUnroll{N,W,T1,V1}},
::Type{VecUnroll{N,W,T2,V2}}
) where {N,W,T1,T2,V1,V2} = _assemble_vec_unroll(Val{N}(), promote_type(V1, V2))
Base.promote_rule(
::Type{VecUnroll{N,1,T1,T1}},
::Type{VecUnroll{N,1,T2,T2}}
) where {N,T1,T2} = promote_rule(T1, T2)
Base.promote_rule(
::Type{VecUnroll{N,W,T1,V1}},
::Type{VecUnroll{N,1,T2,T2}}
) where {N,W,T1,T2,V1} = _assemble_vec_unroll(Val{N}(), promote_type(V1, T2))
Base.promote_rule(
::Type{VecUnroll{N,1,T1,T1}},
::Type{VecUnroll{N,W,T2,V2}}
) where {N,W,T1,T2,V2} = _assemble_vec_unroll(Val{N}(), promote_type(T1, V2))
# Base.promote_rule(::Type{VecUnroll{N,1,T1,T1}}, ::Type{VecUnroll{N,1,T2,T2}}) where {N,T1,T2} = _assemble_vec_unroll(Val{N}(), promote_type(T1,T2))
Base.promote_rule(::Type{Mask{W,U}}, ::Type{EVLMask{W,U}}) where {W,U} =
Mask{W,U}
Base.promote_rule(::Type{EVLMask{W,U}}, ::Type{Mask{W,U}}) where {W,U} =
Mask{W,U}
Base.promote_rule(::Type{Bit}, ::Type{T}) where {T<:Number} = T
Base.promote_rule(
::Type{V},
::Type{T}
) where {W,TV,V<:AbstractSIMD{W,TV},T<:Rational} =
promote_type(V, promote_type(TV, T))
issigned(x) = issigned(typeof(x))
issigned(::Type{<:Signed}) = True()
issigned(::Type{<:Unsigned}) = False()
issigned(::Type{<:AbstractSIMD{<:Any,T}}) where {T} = issigned(T)
issigned(::Type{T}) where {T} = nothing
"""
Promote, favoring <:Signed or <:Unsigned of first arg.
"""
@inline promote_div(
x::Union{Integer,StaticInt,AbstractSIMD{<:Any,<:Union{Integer,StaticInt}}},
y::Union{Integer,StaticInt,AbstractSIMD{<:Any,<:Union{Integer,StaticInt}}}
) = promote_div(x, y, issigned(x))
@inline promote_div(x, y) = promote(x, y)
@inline promote_div(x, y, ::Nothing) = promote(x, y) # for Union{Integer,StaticInt}s that are neither Signed or Unsigned, e.g. Bool
@inline function promote_div(x::T1, y::T2, ::True) where {T1,T2}
T = promote_type(T1, T2)
signed(x % T), signed(y % T)
end
@inline function promote_div(x::T1, y::T2, ::False) where {T1,T2}
T = promote_type(T1, T2)
unsigned(x % T), unsigned(y % T)
end
itosize(i::Union{I,AbstractSIMD{<:Any,I}}, ::Type{J}) where {I,J} =
signorunsign(i % J, issigned(I))
signorunsign(i, ::True) = signed(i)
signorunsign(i, ::False) = unsigned(i)
# Base.promote_rule(::Type{VecTile{M,N,W,T1}}, ::Type{T2}) where {M,N,W,T1,T2<:NativeTypes} = VecTile{M,N,W,promote_rule(T1,T2)}
# Base.promote_rule(::Type{VecTile{M,N,W,T1}}, ::Type{Vec{W,T2}}) where {M,N,W,T1,T2} = VecTile{M,N,W,promote_rule(T1,T2)}
# Base.promote_rule(::Type{VecTile{M,N,W,T1}}, ::Type{VecUnroll{M,W,T2}}) where {M,N,W,T1,T2} = VecTile{M,N,W,promote_rule(T1,T2)}
# Base.promote_rule(::Type{VecTile{M,N,W,T1}}, ::Type{VecTile{M,N,W,T2}}) where {M,N,W,T1,T2} = VecTile{M,N,W,promote_rule(T1,T2)}
@generated function _ff_promote_rule(
::Type{T1},
::Type{T2},
::Val{W},
::StaticInt{RS}
) where {T1<:IntegerTypes,T2<:FloatingTypes,W,RS}
T_canon = promote_type(T1, T2)
(sizeof(T_canon) * W ≤ RS) && return T_canon
@assert sizeof(T1) * W ≤ RS
@assert sizeof(T1) == 4
Float32
end
@inline function ff_promote_rule(
::Type{T1},
::Type{T2},
::Val{W}
) where {T1<:IntegerTypes,T2<:FloatingTypes,W}
_ff_promote_rule(T1, T2, Val{W}(), register_size())
end
@generated function _promote_rule(
::Type{V1},
::Type{V2},
::StaticInt{RS}
) where {W,T1,T2,V1<:AbstractSIMDVector{W,T1},V2<:AbstractSIMDVector{W,T2},RS}
T = if T1 <: StaticInt
if T2 <: StaticInt
Int
else
T2
end
elseif T2 <: StaticInt
T1
else
promote_type(T1, T2) # `T1` and `T2` should be defined in `Base`
end
if RS ≥ W * sizeof(T)
return :(Vec{$W,$T})
end
if T === Float64 || T === Float32
N = (sizeof(T) * W) ÷ RS
Wnew, r = divrem(W, N)
@assert iszero(r)
return :(VecUnroll{$(N - 1),$Wnew,$T,Vec{$Wnew,$T}})
# Should we demote `Float64` -> `Float32`?
# return :(Vec{$W,$T})
# don't demote to smaller than `Float32`
# return :(Vec{$W,Float32})
end
# They're both of integers
V1MM = V1 <: MM
V2MM = V2 <: MM
if V1MM ⊻ V2MM
V1MM ? :(Vec{$W,$T2}) : :(Vec{$W,$T1})
else # either both are `MM` or neither are
B = W ÷ sizeof(T)
if !V1MM # if neither are
B = max(4, B)
end
I = integer_of_bytes_symbol(B, T <: Unsigned)
:(Vec{$W,$I})
end
end
@inline function Base.promote_rule(
::Type{V1},
::Type{V2}
) where {W,T1,T2,V1<:AbstractSIMDVector{W,T1},V2<:AbstractSIMDVector{W,T2}}
_promote_rule(V1, V2, register_size(promote_type(T1, T2)))
end
maybethrow(::True) = throw(ArgumentError("The arguments were invalid."))
maybethrow(::False) = nothing
# not @generated, because calling `promote_type` on vector types
@inline function Base.promote_rule(
::Type{VecUnroll{Nm1,Wsplit,T,V1}},
::Type{V2}
) where {Nm1,Wsplit,T,V1,T2,W,V2<:AbstractSIMDVector{W,T2}}
maybethrow(
ArrayInterface.ne(
StaticInt{Nm1}() * StaticInt{Wsplit}() + StaticInt{Wsplit}(),
StaticInt{W}()
)
)
V3 = Vec{Wsplit,T2}
_assemble_vec_unroll(Val{Nm1}(), promote_type(V1, V3))
end
@inline function Base.promote_rule(
::Type{VecUnroll{Nm1,Wsplit,T,V1}},
::Type{V2}
) where {Nm1,Wsplit,T,V1,W,V2<:AbstractMask{W}}
maybethrow(
ArrayInterface.ne(
StaticInt{Nm1}() * StaticInt{Wsplit}() + StaticInt{Wsplit}(),
StaticInt{W}()
)
)
V3 = Mask{Wsplit,mask_type(StaticInt{Wsplit}())}
_assemble_vec_unroll(Val{Nm1}(), promote_type(V1, V3))
end
@inline function Base.promote_rule(
::Type{VecUnroll{Nm1,1,T,T}},
::Type{V2}
) where {Nm1,T,T2,W,V2<:AbstractSIMDVector{W,T2}}
_assemble_vec_unroll(Val{Nm1}(), promote_type(T, V2))
end
@inline function Base.promote_rule(
::Type{VecUnroll{Nm1,1,T,T}},
::Type{V2}
) where {Nm1,T,W,V2<:AbstractMask{W}}
_assemble_vec_unroll(Val{Nm1}(), promote_type(T, V2))
end
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 17319 |
@generated function _vrange(
::Val{W},
::Type{T},
::Val{O},
::Val{F}
) where {W,T,O,F}
t = Expr(:tuple)
foreach(
w -> push!(t.args, Expr(:call, :(Core.VecElement), T(F * w + O))),
0:W-1
)
Expr(:block, Expr(:meta, :inline), Expr(:call, :Vec, t))
end
@inline function vrange(::Val{W}, ::Type{T}, ::Val{O}, ::Val{F}) where {W,T,O,F}
_vrange(Val{W}(), pick_integer(Val{W}(), T), Val{O}(), Val{F}())
end
function pick_integer_bytes(
W::Int,
preferred::Int,
sirs::Int,
minbytes::Int = min(preferred, 4)
)
# SIMD quadword integer support requires AVX512DQ
# preferred = AVX512DQ ? preferred : min(4, preferred)
max(minbytes, min(preferred, prevpow2(sirs ÷ W)))
end
"""
vrange(::Val{W}, i::I, ::Val{O}, ::Val{F})
W - Vector width
i::I - dynamic offset
O - static offset
F - static multiplicative factor
"""
@generated function _vrangeincr(
::Val{W},
i::I,
::Val{O},
::Val{F},
::StaticInt{SIRS}
) where {W,I<:Union{Integer,StaticInt},O,F,SIRS}
isone(W) &&
return Expr(:block, Expr(:meta, :inline), :(Base.add_int(i, $(O % I))))
bytes = pick_integer_bytes(W, sizeof(I), SIRS)
bits = 8bytes
jtypesym = Symbol(I <: Signed ? :Int : :UInt, bits)
iexpr = bytes == sizeof(I) ? :i : Expr(:call, :%, :i, jtypesym)
typ = "i$(bits)"
vtyp = vtype(W, typ)
rangevec = join(("$typ $(F*w + O)" for w ∈ 0:W-1), ", ")
instrs = """
%ie = insertelement $vtyp undef, $typ %0, i32 0
%v = shufflevector $vtyp %ie, $vtyp undef, <$W x i32> zeroinitializer
%res = add nsw $vtyp %v, <$rangevec>
ret $vtyp %res
"""
quote
$(Expr(:meta, :inline))
Vec($LLVMCALL($instrs, _Vec{$W,$jtypesym}, Tuple{$jtypesym}, $iexpr))
end
end
@inline function vrangeincr(
::Val{W},
i::I,
::Val{O},
::Val{F}
) where {W,I<:Union{Integer,StaticInt},O,F}
_vrangeincr(Val{W}(), i, Val{O}(), Val{F}(), simd_integer_register_size())
end
@generated function vrangeincr(
::Val{W},
i::T,
::Val{O},
::Val{F}
) where {W,T<:FloatingTypes,O,F}
isone(W) && return Expr(
:block,
Expr(:meta, :inline),
:(Base.add_float_fast(i, $(T(O))))
)
typ = LLVM_TYPES[T]
vtyp = vtype(W, typ)
rangevec = join(("$typ $(F*w+O).0" for w ∈ 0:W-1), ", ")
instrs = """
%ie = insertelement $vtyp undef, $typ %0, i32 0
%v = shufflevector $vtyp %ie, $vtyp undef, <$W x i32> zeroinitializer
%res = fadd fast $vtyp %v, <$rangevec>
ret $vtyp %res
"""
quote
$(Expr(:meta, :inline))
Vec($LLVMCALL($instrs, _Vec{$W,$T}, Tuple{$T}, i))
end
end
# @generated function vrangemul(::Val{W}, i::I, ::Val{O}, ::Val{F}) where {W,I<:Integer,O,F}
# isone(W) && return Expr(:block, Expr(:meta,:inline), :(vmul(i, $(O % I))))
# bytes = pick_integer_bytes(W, sizeof(T))
# bits = 8bytes
# jtypesym = Symbol(I <: Signed ? :Int : :UInt, bits)
# iexpr = bytes == sizeof(I) ? :i : Expr(:call, :%, :i, jtypesym)
# typ = "i$(bits)"
# vtyp = vtype(W, typ)
# rangevec = join(("$typ $(F*w+O)" for w ∈ 0:W-1), ", ")
# instrs = """
# %ie = insertelement $vtyp undef, $typ %0, i32 0
# %v = shufflevector $vtyp %ie, $vtyp undef, <$W x i32> zeroinitializer
# %res = mul nsw $vtyp %v, <$rangevec>
# ret $vtyp %res
# """
# quote
# $(Expr(:meta,:inline))
# Vec($LLVMCALL(instrs, _Vec{$W,$jtypesym}, Tuple{$jtypesym}, $iexpr))
# end
# end
# @generated function vrangemul(::Val{W}, i::T, ::Val{O}, ::Val{F}) where {W,T<:FloatingTypes,O,F}
# isone(W) && return Expr(:block, Expr(:meta,:inline), :(Base.FastMath.mul_fast(i, $(T(O)))))
# typ = LLVM_TYPES[T]
# vtyp = vtype(W, typ)
# rangevec = join(("$typ $(F*w+O).0" for w ∈ 0:W-1), ", ")
# instrs = """
# %ie = insertelement $vtyp undef, $typ %0, i32 0
# %v = shufflevector $vtyp %ie, $vtyp undef, <$W x i32> zeroinitializer
# %res = fmul fast $vtyp %v, <$rangevec>
# ret $vtyp %res
# """
# quote
# $(Expr(:meta,:inline))
# Vec($LLVMCALL(instrs, _Vec{$W,$T}, Tuple{$T}, i))
# end
# end
@inline Vec(i::MM{W,X}) where {W,X} =
vrangeincr(Val{W}(), data(i), Val{0}(), Val{X}())
@inline Vec(i::MM{W,X,StaticInt{N}}) where {W,X,N} =
vrange(Val{W}(), Int, Val{N}(), Val{X}())
@inline Vec(i::MM{1}) = data(i)
@inline Vec(i::MM{1,<:Any,StaticInt{N}}) where {N} = N
@inline vconvert(::Type{Vec{W,T}}, i::MM{W,X}) where {W,X,T} =
vrangeincr(Val{W}(), convert(T, data(i)), Val{0}(), Val{X}())
@inline vconvert(::Type{Vec{W,T}}, i::MM{W,X}) where {W,X,T<:IntegerTypesHW} =
vrangeincr(Val{W}(), data(i) % T, Val{0}(), Val{X}())
@inline vconvert(::Type{T}, i::MM{W,X}) where {W,X,T<:NativeTypes} =
vrangeincr(Val{W}(), convert(T, data(i)), Val{0}(), Val{X}())
# Addition
@inline vadd_fast(i::MM{W,X}, j::MM{W,Y}) where {W,X,Y} =
MM{W}(vadd_fast(data(i), data(j)), StaticInt{X}() + StaticInt{Y}())
@inline vadd_fast(i::MM{W}, j::AbstractSIMDVector{W}) where {W} =
vadd_fast(Vec(i), j)
@inline vadd_fast(i::AbstractSIMDVector{W}, j::MM{W}) where {W} =
vadd_fast(i, Vec(j))
@inline vadd_nsw(i::MM{W,X}, j::MM{W,Y}) where {W,X,Y} =
MM{W}(vadd_nsw(data(i), data(j)), StaticInt{X}() + StaticInt{Y}())
@inline vadd_nsw(i::MM{W}, j::AbstractSIMDVector{W}) where {W} =
vadd_nsw(Vec(i), j)
@inline vadd_nsw(i::AbstractSIMDVector{W}, j::MM{W}) where {W} =
vadd_nsw(i, Vec(j))
# @inline vadd(i::MM{W,X}, j::MM{W,Y}) where {W,X,Y} = vadd_fast(i, j)
# @inline vadd(i::MM{W}, j::AbstractSIMDVector{W}) where {W} = vadd_fast(i, j)
# @inline vadd(i::AbstractSIMDVector{W}, j::MM{W}) where {W} = vadd_fast(i, j)
# Subtraction
@inline vsub_fast(i::MM{W,X}, j::MM{W,Y}) where {W,X,Y} =
MM{W}(vsub_fast(data(i), data(j)), StaticInt{X}() - StaticInt{Y}())
@inline vsub_fast(i::MM{W}, j::AbstractSIMDVector{W}) where {W} =
vsub_fast(Vec(i), j)
@inline vsub_fast(i::AbstractSIMDVector{W}, j::MM{W}) where {W} =
vsub_fast(i, Vec(j))
@inline vsub_nsw(i::MM{W,X}, j::MM{W,Y}) where {W,X,Y} =
MM{W}(vsub_nsw(data(i), data(j)), StaticInt{X}() - StaticInt{Y}())
@inline vsub_nsw(i::MM{W}, j::AbstractSIMDVector{W}) where {W} =
vsub_nsw(Vec(i), j)
@inline vsub_nsw(i::AbstractSIMDVector{W}, j::MM{W}) where {W} =
vsub_nsw(i, Vec(j))
# Multiplication
@inline vmul_fast(i::MM{W}, j::AbstractSIMDVector{W}) where {W} =
vmul_fast(Vec(i), j)
@inline vmul_fast(i::AbstractSIMDVector{W}, j::MM{W}) where {W} =
vmul_fast(i, Vec(j))
@inline vmul_fast(i::MM{W}, j::MM{W}) where {W} = vmul_fast(Vec(i), Vec(j))
@inline vmul_fast(i::MM, j::IntegerTypesHW) = vmul_fast(Vec(i), j)
@inline vmul_fast(j::IntegerTypesHW, i::MM) = vmul_fast(j, Vec(i))
@inline vmul_nsw(i::MM{W}, j::AbstractSIMDVector{W}) where {W} =
vmul_nsw(Vec(i), j)
@inline vmul_nsw(i::AbstractSIMDVector{W}, j::MM{W}) where {W} =
vmul_nsw(i, Vec(j))
@inline vmul_nsw(i::MM{W}, j::MM{W}) where {W} = vmul_nsw(Vec(i), Vec(j))
@inline vmul_nsw(i::MM, j::IntegerTypesHW) = vmul_nsw(Vec(i), j)
@inline vmul_nsw(j::IntegerTypesHW, i::MM) = vmul_nsw(j, Vec(i))
# Division
@generated _floattype(::Union{StaticInt{R},Val{R}}) where {R} =
R ≥ 8 ? :Float64 : :Float32
@inline floattype(::Val{W}) where {W} =
_floattype(register_size() ÷ StaticInt{W}())
@inline vfloat(i::MM{W,X,I}) where {W,X,I} =
Vec(MM{W,X}(floattype(Val{W}())(getfield(i, :i) % pick_integer(Val{W}(), I))))
@inline vfdiv(i::MM, j::T) where {T<:Real} = float(i) / j
@inline vfdiv(j::T, i::MM) where {T<:Real} = j / float(i)
@inline vfdiv_fast(i::MM, j::MM) = vfdiv_fast(float(i), float(j))
@inline vfdiv_fast(i::MM, j::T) where {T<:Real} = vfdiv_fast(float(i), j)
@inline vfdiv_fast(j::T, i::MM) where {T<:Real} = vfdiv_fast(j, float(i))
@inline vfdiv(x::AbstractSIMDVector{W}, y::VectorizationBase.MM{W}) where {W} =
x / float(y)
@inline vfdiv(y::VectorizationBase.MM{W}, x::AbstractSIMDVector{W}) where {W} =
float(y) / x
@inline vfdiv_fast(
x::AbstractSIMDVector{W},
y::VectorizationBase.MM{W}
) where {W} = vfiv_fast(x, float(y))
@inline vfdiv_fast(
y::VectorizationBase.MM{W},
x::AbstractSIMDVector{W}
) where {W} = vfdiv_fast(float(y), x)
@inline vfdiv(i::MM, j::VecUnroll{N,W,T,V}) where {N,W,T,V} = float(i) / j
@inline vfdiv(j::VecUnroll{N,W,T,V}, i::MM) where {N,W,T,V} = j / float(i)
@inline vfdiv(i::MM, j::MM) = float(i) / float(j)
@inline vfdiv(vu::VecUnroll, m::MM) = vu * inv(m)
@inline vfdiv(m::MM, vu::VecUnroll) = Vec(m) / vu
@inline Base.:(<<)(i::MM{W,X,T}, j::StaticInt) where {W,X,T<:IntegerTypes} =
MM{W}(getfield(i, :i) << j, StaticInt{X}() << j)
@inline Base.:(>>)(i::MM{W,X,T}, j::StaticInt) where {W,X,T<:IntegerTypes} =
MM{W}(getfield(i, :i) >> j, StaticInt{X}() >> j)
@inline Base.:(>>>)(i::MM{W,X,T}, j::StaticInt) where {W,X,T<:IntegerTypes} =
MM{W}(getfield(i, :i) >>> j, StaticInt{X}() >>> j)
@inline Base.:(<<)(i::MM{W,X,T}, j::StaticInt) where {W,X,T<:StaticInt} =
MM{W}(getfield(i, :i) << j, StaticInt{X}() << j)
@inline Base.:(>>)(i::MM{W,X,T}, j::StaticInt) where {W,X,T<:StaticInt} =
MM{W}(getfield(i, :i) >> j, StaticInt{X}() >> j)
@inline Base.:(>>>)(i::MM{W,X,T}, j::StaticInt) where {W,X,T<:StaticInt} =
MM{W}(getfield(i, :i) >>> j, StaticInt{X}() >>> j)
# for (f,op) ∈ [
# (:scalar_less, :(<)), (:scalar_greater,:(>)), (:scalar_greaterequal,:(≥)), (:scalar_lessequal,:(≤)), (:scalar_equal,:(==)), (:scalar_notequal,:(!=))
# ]
# @eval @inline $f(i::MM, j::Real) = $op(data(i), j)
# @eval @inline $f(i::Real, j::MM) = $op(i, data(j))
# @eval @inline $f(i::MM, ::StaticInt{j}) where {j} = $op(data(i), j)
# @eval @inline $f(::StaticInt{i}, j::MM) where {i} = $op(i, data(j))
# @eval @inline $f(i::MM, j::MM) = $op(data(i), data(j))
# @eval @inline $f(i, j) = $op(i, j)
# end
for f ∈ [:vshl, :vashr, :vlshr]
@eval begin
@inline $f(i::MM{W,X,T}, v::SignedHW) where {W,X,T<:SignedHW} =
$f(Vec(i), v)
@inline $f(i::MM{W,X,T}, v::SignedHW) where {W,X,T<:UnsignedHW} =
$f(Vec(i), v)
@inline $f(i::MM{W,X,T}, v::UnsignedHW) where {W,X,T<:SignedHW} =
$f(Vec(i), v)
@inline $f(i::MM{W,X,T}, v::UnsignedHW) where {W,X,T<:UnsignedHW} =
$f(Vec(i), v)
@inline $f(i::MM{W,X,T}, v::IntegerTypesHW) where {W,X,T<:StaticInt} =
$f(Vec(i), v)
@inline $f(v::SignedHW, i::MM{W,X,T}) where {W,X,T<:SignedHW} =
$f(v, Vec(i))
@inline $f(v::UnsignedHW, i::MM{W,X,T}) where {W,X,T<:SignedHW} =
$f(v, Vec(i))
@inline $f(v::SignedHW, i::MM{W,X,T}) where {W,X,T<:UnsignedHW} =
$f(v, Vec(i))
@inline $f(v::UnsignedHW, i::MM{W,X,T}) where {W,X,T<:UnsignedHW} =
$f(v, Vec(i))
@inline $f(v::IntegerTypesHW, i::MM{W,X,T}) where {W,X,T<:StaticInt} =
$f(v, Vec(i))
@inline $f(
i::MM{W,X1,T1},
j::MM{W,X2,T2}
) where {W,X1,X2,T1<:SignedHW,T2<:SignedHW} = $f(Vec(i), Vec(j))
@inline $f(
i::MM{W,X1,T1},
j::MM{W,X2,T2}
) where {W,X1,X2,T1<:UnsignedHW,T2<:SignedHW} = $f(Vec(i), Vec(j))
@inline $f(
i::MM{W,X1,T1},
j::MM{W,X2,T2}
) where {W,X1,X2,T1<:SignedHW,T2<:UnsignedHW} = $f(Vec(i), Vec(j))
@inline $f(
i::MM{W,X1,T1},
j::MM{W,X2,T2}
) where {W,X1,X2,T1<:UnsignedHW,T2<:UnsignedHW} = $f(Vec(i), Vec(j))
@inline $f(
i::MM{W,X1,T1},
j::MM{W,X2,T2}
) where {W,X1,X2,T1<:StaticInt,T2<:IntegerTypes} = $f(Vec(i), Vec(j))
@inline $f(
i::MM{W,X1,T1},
j::MM{W,X2,T2}
) where {W,X1,X2,T1<:IntegerTypes,T2<:StaticInt} = $f(Vec(i), Vec(j))
@inline $f(
i::MM{W,X1,T1},
j::MM{W,X2,T2}
) where {W,X1,X2,T1<:StaticInt,T2<:StaticInt} = $f(Vec(i), Vec(j))
@inline $f(
i::MM{W,X,T1},
v::AbstractSIMDVector{W,T2}
) where {W,X,T1<:SignedHW,T2<:SignedHW} = $f(Vec(i), v)
@inline $f(
i::MM{W,X,T1},
v::AbstractSIMDVector{W,T2}
) where {W,X,T1<:UnsignedHW,T2<:SignedHW} = $f(Vec(i), v)
@inline $f(
i::MM{W,X,T1},
v::AbstractSIMDVector{W,T2}
) where {W,X,T1<:SignedHW,T2<:UnsignedHW} = $f(Vec(i), v)
@inline $f(
i::MM{W,X,T1},
v::AbstractSIMDVector{W,T2}
) where {W,X,T1<:UnsignedHW,T2<:UnsignedHW} = $f(Vec(i), v)
@inline $f(
i::MM{W,X,T1},
v::AbstractSIMDVector{W,T2}
) where {W,X,T1<:StaticInt,T2<:IntegerTypes} = $f(Vec(i), v)
@inline $f(
v::AbstractSIMDVector{W,T1},
i::MM{W,X,T2}
) where {W,X,T1<:SignedHW,T2<:SignedHW} = $f(v, Vec(i))
@inline $f(
v::AbstractSIMDVector{W,T1},
i::MM{W,X,T2}
) where {W,X,T1<:UnsignedHW,T2<:SignedHW} = $f(v, Vec(i))
@inline $f(
v::AbstractSIMDVector{W,T1},
i::MM{W,X,T2}
) where {W,X,T1<:SignedHW,T2<:UnsignedHW} = $f(v, Vec(i))
@inline $f(
v::AbstractSIMDVector{W,T1},
i::MM{W,X,T2}
) where {W,X,T1<:UnsignedHW,T2<:UnsignedHW} = $f(v, Vec(i))
@inline $f(
v::AbstractSIMDVector{W,T1},
i::MM{W,X,T2}
) where {W,X,T1<:IntegerTypes,T2<:StaticInt} = $f(v, Vec(i))
end
end
# for f ∈ [:vand, :vor, :vxor, :vlt, :vle, :vgt, :vge, :veq, :vne, :vmin, :vmax, :vcopysign]
for f ∈ [
:vand,
:vor,
:vxor,
:veq,
:vne,
:vmin,
:vmin_fast,
:vmax,
:vmax_fast,
:vcopysign
]
@eval begin
@inline $f(i::MM{W,X,T}, v::IntegerTypes) where {W,X,T<:IntegerTypes} =
$f(Vec(i), v)
@inline $f(v::IntegerTypes, i::MM{W,X,T}) where {W,X,T<:IntegerTypes} =
$f(v, Vec(i))
@inline $f(
i::MM{W,X,T1},
v::AbstractSIMDVector{W,T2}
) where {W,X,T1<:IntegerTypes,T2<:IntegerTypes} = $f(Vec(i), v)
@inline $f(
v::AbstractSIMDVector{W,T1},
i::MM{W,X,T2}
) where {W,X,T1<:IntegerTypes,T2<:IntegerTypes} = $f(v, Vec(i))
@inline $f(
i::MM{W,X1,T1},
j::MM{W,X2,T2}
) where {W,X1,X2,T1<:IntegerTypes,T2<:IntegerTypes} = $f(Vec(i), Vec(j))
end
end
for f ∈ [:vdiv, :vrem]
@eval begin
@inline $f(
i::MM{W,X,T1},
v::AbstractSIMDVector{W,T2}
) where {W,X,T1<:SignedHW,T2<:IntegerTypes} = $f(Vec(i), v)
@inline $f(
v::AbstractSIMDVector{W,T1},
i::MM{W,X,T2}
) where {W,X,T1<:SignedHW,T2<:IntegerTypes} = $f(v, Vec(i))
@inline $f(
i::MM{W,X1,T1},
j::MM{W,X2,T2}
) where {W,X1,X2,T1<:SignedHW,T2<:IntegerTypes} = $f(Vec(i), Vec(j))
@inline $f(
i::MM{W,X,T1},
v::AbstractSIMDVector{W,T2}
) where {W,X,T1<:UnsignedHW,T2<:IntegerTypes} = $f(Vec(i), v)
@inline $f(
v::AbstractSIMDVector{W,T1},
i::MM{W,X,T2}
) where {W,X,T1<:UnsignedHW,T2<:IntegerTypes} = $f(v, Vec(i))
@inline $f(
i::MM{W,X1,T1},
j::MM{W,X2,T2}
) where {W,X1,X2,T1<:UnsignedHW,T2<:IntegerTypes} = $f(Vec(i), Vec(j))
end
end
for f ∈ [
:vlt,
:vle,
:vgt,
:vge,
:veq,
:vne,
:vmin,
:vmax,
:vmin_fast,
:vmax_fast,
:vcopysign
]
@eval begin
# left floating
@inline $f(i::MM{W,X,T}, v::IntegerTypes) where {W,X,T<:FloatingTypes} =
$f(Vec(i), v)
@inline $f(
i::MM{W,X,T1},
v::AbstractSIMDVector{W,T2}
) where {W,X,T1<:FloatingTypes,T2<:IntegerTypes} = $f(Vec(i), v)
@inline $f(
v::AbstractSIMDVector{W,T1},
i::MM{W,X,T2}
) where {W,X,T1<:FloatingTypes,T2<:IntegerTypes} = $f(v, Vec(i))
@inline $f(
i::MM{W,X1,T1},
j::MM{W,X2,T2}
) where {W,X1,X2,T1<:FloatingTypes,T2<:IntegerTypes} = $f(Vec(i), Vec(j))
# right floating
@inline $f(i::MM{W,X,T}, v::FloatingTypes) where {W,X,T<:IntegerTypes} =
$f(Vec(i), v)
@inline $f(v::IntegerTypes, i::MM{W,X,T}) where {W,X,T<:FloatingTypes} =
$f(v, Vec(i))
@inline $f(
i::MM{W,X,T1},
v::AbstractSIMDVector{W,T2}
) where {W,X,T1<:IntegerTypes,T2<:FloatingTypes} = $f(Vec(i), v)
@inline $f(
v::AbstractSIMDVector{W,T1},
i::MM{W,X,T2}
) where {W,X,T1<:IntegerTypes,T2<:FloatingTypes} = $f(v, Vec(i))
@inline $f(
i::MM{W,X1,T1},
j::MM{W,X2,T2}
) where {W,X1,X2,T1<:IntegerTypes,T2<:FloatingTypes} = $f(Vec(i), Vec(j))
# both floating
@inline $f(i::MM{W,X,T}, v::FloatingTypes) where {W,X,T<:FloatingTypes} =
$f(Vec(i), v)
@inline $f(
i::MM{W,X,T1},
v::AbstractSIMDVector{W,T2}
) where {W,X,T1<:FloatingTypes,T2<:FloatingTypes} = $f(Vec(i), v)
@inline $f(
v::AbstractSIMDVector{W,T1},
i::MM{W,X,T2}
) where {W,X,T1<:FloatingTypes,T2<:FloatingTypes} = $f(v, Vec(i))
@inline $f(
i::MM{W,X1,T1},
j::MM{W,X2,T2}
) where {W,X1,X2,T1<:FloatingTypes,T2<:FloatingTypes} = $f(Vec(i), Vec(j))
end
if f === :copysign
@eval begin
@inline $f(v::Float32, i::MM{W,X,T}) where {W,X,T<:IntegerTypes} =
$f(v, Vec(i))
@inline $f(v::Float32, i::MM{W,X,T}) where {W,X,T<:FloatingTypes} =
$f(v, Vec(i))
@inline $f(v::Float64, i::MM{W,X,T}) where {W,X,T<:IntegerTypes} =
$f(v, Vec(i))
@inline $f(v::Float64, i::MM{W,X,T}) where {W,X,T<:FloatingTypes} =
$f(v, Vec(i))
end
else
@eval begin
@inline $f(v::FloatingTypes, i::MM{W,X,T}) where {W,X,T<:IntegerTypes} =
$f(v, Vec(i))
@inline $f(v::FloatingTypes, i::MM{W,X,T}) where {W,X,T<:FloatingTypes} =
$f(v, Vec(i))
end
end
end
@inline vadd_fast(i::MM{W,Zero}, j::MM{W,Zero}) where {W} =
vrange(Val{W}(), Int, Val{0}(), Val{2}())
@inline vadd_nsw(i::MM{W,Zero}, j::MM{W,Zero}) where {W} =
vrange(Val{W}(), Int, Val{0}(), Val{2}())
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 5471 | #TODO: Document interface to support static size
# Define maybestaticsize, maybestaticlength, and maybestaticfirstindex
@inline maybestaticfirst(a) = static_first(a)
@inline maybestaticlast(a) = static_last(a)
@inline maybestaticlength(a) = static_length(a)
@inline maybestaticlength(a::UnitRange{T}) where {T} =
last(a) - first(a) + oneunit(T)
@inline maybestaticrange(r::Base.OneTo{T}) where {T} =
ArrayInterface.OptionallyStaticUnitRange(StaticInt{1}(), last(r))
@inline maybestaticrange(r::UnitRange) = r
@inline maybestaticrange(r) = maybestaticfirst(r):maybestaticlast(r)
@inline maybestaticsize(::NTuple{N}, ::Val{1}) where {N} = StaticInt{N}() # should we assert that i == 1?
@inline maybestaticsize(
::LinearAlgebra.Adjoint{T,V},
::Val{1}
) where {T,V<:AbstractVector{T}} = One()
@inline maybestaticsize(
::LinearAlgebra.Transpose{T,V},
::Val{1}
) where {T,V<:AbstractVector{T}} = One()
@inline maybestaticsize(A, ::Val{N}) where {N} =
ArrayInterface.static_size(A)[N]
# These have versions that may allow for more optimizations, so we override base methods with a single `StaticInt` argument.
for (f, ff) ∈ [
(:(Base.:+), :vadd_fast),
(:(Base.:-), :vsub_fast),
(:(Base.:*), :vmul_fast),
(:(Base.:+), :vadd_nsw),
(:(Base.:-), :vsub_nsw),
(:(Base.:*), :vmul_nsw),
(:(Base.:+), :vadd_nuw),
(:(Base.:-), :vsub_nuw),
(:(Base.:*), :vmul_nuw),
(:(Base.:+), :vadd_nw),
(:(Base.:-), :vsub_nw),
(:(Base.:*), :vmul_nw),
(:(Base.:<<), :vshl),
(:(Base.:÷), :vdiv),
(:(Base.:%), :vrem),
(:(Base.:>>>), :vashr)
]
@eval begin
# @inline $f(::StaticInt{M}, ::StaticInt{N}) where {M, N} = StaticInt{$f(M, N)}()
# If `M` and `N` are known at compile time, there's no need to add nsw/nuw flags.
@inline $ff(::StaticInt{M}, ::StaticInt{N}) where {M,N} =
$f(StaticInt{M}(), StaticInt{N}())
# @inline $f(::StaticInt{M}, x) where {M} = $ff(M, x)
# @inline $f(x, ::StaticInt{M}) where {M} = $ff(x, M)
@inline $ff(::StaticInt{M}, x::T) where {M,T<:IntegerTypesHW} =
$ff(M % T, x)
@inline $ff(x::T, ::StaticInt{M}) where {M,T<:IntegerTypesHW} =
$ff(x, M % T)
@inline $ff(::StaticInt{M}, x::T) where {M,T} = $ff(T(M), x)
@inline $ff(x::T, ::StaticInt{M}) where {M,T} = $ff(x, T(M))
end
end
for f ∈ [:vadd_fast, :vsub_fast, :vmul_fast]
@eval begin
@inline $f(::StaticInt{M}, n::T) where {M,T<:Number} = $f(T(M), n)
@inline $f(m::T, ::StaticInt{N}) where {N,T<:Number} = $f(m, T(N))
end
end
for f ∈ [:vsub, :vsub_fast, :vsub_nsw, :vsub_nuw, :vsub_nw]
@eval begin
@inline $f(::Zero, m::Number) = -m
@inline $f(::Zero, m::IntegerTypesHW) = -m
@inline $f(m::Number, ::Zero) = m
@inline $f(m::IntegerTypesHW, ::Zero) = m
@inline $f(::Zero, ::Zero) = Zero()
@inline $f(::Zero, ::StaticInt{N}) where {N} = -StaticInt{N}()
@inline $f(::StaticInt{N}, ::Zero) where {N} = StaticInt{N}()
end
end
for f ∈ [:vadd, :vadd_fast, :vadd_nsw, :vadd_nuw, :vadd_nw]
@eval begin
@inline $f(::StaticInt{N}, ::Zero) where {N} = StaticInt{N}()
@inline $f(::Zero, ::StaticInt{N}) where {N} = StaticInt{N}()
@inline $f(::Zero, ::Zero) = Zero()
@inline $f(a::Number, ::Zero) = a
@inline $f(a::IntegerTypesHW, ::Zero) = a
@inline $f(::Zero, a::Number) = a
@inline $f(::Zero, a::IntegerTypesHW) = a
end
end
@inline vmul_fast(::StaticInt{N}, ::Zero) where {N} = Zero()
@inline vmul_fast(::Zero, ::StaticInt{N}) where {N} = Zero()
@inline vmul_fast(::Zero, ::Zero) = Zero()
@inline vmul_fast(::StaticInt{N}, ::One) where {N} = StaticInt{N}()
@inline vmul_fast(::One, ::StaticInt{N}) where {N} = StaticInt{N}()
@inline vmul_fast(::One, ::One) = One()
@inline vmul_fast(a::Number, ::One) = a
@inline vmul_fast(a::MM, ::One) = a
@inline vmul_fast(a::IntegerTypesHW, ::One) = a
@inline vmul_fast(::One, a::Number) = a
@inline vmul_fast(::One, a::MM) = a
@inline vmul_fast(::One, a::IntegerTypesHW) = a
@inline vmul_fast(::Zero, ::One) = Zero()
@inline vmul_fast(::One, ::Zero) = Zero()
for T ∈ [:VecUnroll, :AbstractMask, :MM]
@eval begin
@inline Base.:(+)(x::$T, ::Zero) = x
@inline Base.:(+)(::Zero, x::$T) = x
@inline Base.:(-)(x::$T, ::Zero) = x
@inline Base.:(*)(x::$T, ::One) = x
@inline Base.:(*)(::One, x::$T) = x
@inline Base.:(*)(::$T, ::Zero) = Zero()
@inline Base.:(*)(::Zero, ::$T) = Zero()
end
end
@inline Base.:(+)(m::AbstractMask{W}, ::StaticInt{N}) where {N,W} =
m + vbroadcast(Val{W}(), N)
@inline Base.:(+)(::StaticInt{N}, m::AbstractMask{W}) where {N,W} =
vbroadcast(Val{W}(), N) + m
# @inline Base.:(*)(::StaticInt{N}, m::Mask{W}) where {N,W} = vbroadcast(Val{W}(), N) * m
@inline vadd_fast(x::VecUnroll, ::Zero) = x
@inline vadd_fast(::Zero, x::VecUnroll) = x
@inline vsub_fast(x::VecUnroll, ::Zero) = x
@inline vmul_fast(x::VecUnroll, ::One) = x
@inline vmul_fast(::One, x::VecUnroll) = x
@inline vmul_fast(::VecUnroll, ::Zero) = Zero()
@inline vmul_fast(::Zero, ::VecUnroll) = Zero()
for V ∈ [:AbstractSIMD, :MM]
@eval begin
@inline Base.FastMath.mul_fast(::Zero, x::$V) = Zero()
@inline Base.FastMath.mul_fast(::One, x::$V) = x
@inline Base.FastMath.mul_fast(x::$V, ::Zero) = Zero()
@inline Base.FastMath.mul_fast(x::$V, ::One) = x
@inline Base.FastMath.add_fast(::Zero, x::$V) = x
@inline Base.FastMath.add_fast(x::$V, ::Zero) = x
@inline Base.FastMath.sub_fast(::Zero, x::$V) = -x
@inline Base.FastMath.sub_fast(x::$V, ::Zero) = x
end
end
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 6494 |
# nextpow2(W) = vshl(one(W), vsub_fast(8sizeof(W), leading_zeros(vsub_fast(W, one(W)))))
# @inline _pick_vector(::StaticInt{W}, ::Type{T}) where {W,T} = Vec{W,T}
# @inline pick_vector(::Type{T}) where {T} = _pick_vector(pick_vector_width(T), T)
# @inline function pick_vector(::Val{N}, ::Type{T}) where {N, T}
# _pick_vector(smin(nextpow2(StaticInt{N}()), pick_vector_width(T)), T)
# end
# pick_vector(N::Int, ::Type{T}) where {T} = pick_vector(Val(N), T)
@inline MM(::Union{Val{W},StaticInt{W}}) where {W} = MM{W}(0)
@inline MM(::Union{Val{W},StaticInt{W}}, i) where {W} = MM{W}(i)
@inline MM(::Union{Val{W},StaticInt{W}}, i::AbstractSIMDVector{W}) where {W} = i
@inline MM(::StaticInt{W}, i, ::StaticInt{X}) where {W,X} = MM{W,X}(i)
@inline gep(ptr::Ptr, i::MM) = gep(ptr, data(i))
@inline Base.one(::Type{MM{W,X,I}}) where {W,X,I} = one(I)
@inline staticm1(i::MM{W,X,I}) where {W,X,I} =
MM{W,X}(vsub_fast(data(i), one(I)))
@inline staticp1(i::MM{W,X,I}) where {W,X,I} =
MM{W,X}(vadd_nsw(data(i), one(I)))
@inline vadd_fast(i::MM{W,X}, j::IntegerTypesHW) where {W,X} =
MM{W,X}(vadd_fast(data(i), j))
@inline vadd_fast(i::IntegerTypesHW, j::MM{W,X}) where {W,X} =
MM{W,X}(vadd_fast(i, data(j)))
@inline vadd_fast(i::MM{W,X}, ::StaticInt{j}) where {W,X,j} =
MM{W,X}(vadd_fast(data(i), StaticInt{j}()))
@inline vadd_fast(::StaticInt{i}, j::MM{W,X}) where {W,X,i} =
MM{W,X}(vadd_fast(StaticInt{i}(), data(j)))
@inline vadd_fast(i::MM{W,X}, ::StaticInt{0}) where {W,X} = i
@inline vadd_fast(::StaticInt{0}, j::MM{W,X}) where {W,X} = j
@inline vsub_fast(i::MM{W,X}, j::IntegerTypesHW) where {W,X} =
MM{W,X}(vsub_fast(data(i), j))
@inline vsub_fast(i::MM{W,X}, ::StaticInt{j}) where {W,X,j} =
MM{W,X}(vsub_fast(data(i), StaticInt{j}()))
@inline vsub_fast(i::MM{W,X}, ::StaticInt{0}) where {W,X} = i
@inline vadd_nsw(i::MM{W,X}, j::IntegerTypesHW) where {W,X} =
MM{W,X}(vadd_nsw(data(i), j))
@inline vadd_nsw(i::IntegerTypesHW, j::MM{W,X}) where {W,X} =
MM{W,X}(vadd_nsw(i, data(j)))
@inline vadd_nsw(i::MM{W,X}, ::StaticInt{j}) where {W,X,j} =
MM{W,X}(vadd_nsw(data(i), StaticInt{j}()))
@inline vadd_nsw(i::MM, ::Zero) = i
@inline vadd_nsw(::StaticInt{i}, j::MM{W,X}) where {W,X,i} =
MM{W,X}(vadd_nsw(StaticInt{i}(), data(j)))
@inline vadd_nsw(::Zero, j::MM{W,X}) where {W,X} = j
@inline vsub_nsw(i::MM{W,X}, j::IntegerTypesHW) where {W,X} =
MM{W,X}(vsub_nsw(data(i), j))
@inline vsub_nsw(i::MM{W,X}, ::StaticInt{j}) where {W,X,j} =
MM{W,X}(vsub_nsw(data(i), StaticInt{j}()))
@inline vsub(i::MM{W,X}, ::StaticInt{0}) where {W,X} = i
@inline vsub_fast(i::MM) = i * StaticInt{-1}()
@inline vsub_nsw(i::MM) = i * StaticInt{-1}()
@inline vsub(i::MM) = i * StaticInt{-1}()
@inline vadd(i::MM, j::IntegerTypesHW) = vadd_fast(i, j)
@inline vadd(j::IntegerTypesHW, i::MM) = vadd_fast(j, i)
@inline vsub(i::MM, j::IntegerTypesHW) = vsub_fast(i, j)
@inline vsub(j::IntegerTypesHW, i::MM) = vsub_fast(j, i)
@inline vadd(i::MM, ::StaticInt{j}) where {j} = vadd_fast(i, StaticInt{j}())
@inline vadd(::StaticInt{j}, i::MM) where {j} = vadd_fast(StaticInt{j}(), i)
@inline vsub(i::MM, ::StaticInt{j}) where {j} = vsub_fast(i, StaticInt{j}())
@inline vsub(::StaticInt{j}, i::MM) where {j} = vsub_fast(StaticInt{j}(), i)
@inline vadd(i::MM, ::Zero) = i
@inline vadd(::Zero, i::MM) = i
@inline vsub(::Zero, i::MM) = StaticInt{-1}() * i
@inline vmul_nsw(::StaticInt{M}, i::MM{W,X}) where {M,W,X} =
MM{W}(vmul_nsw(data(i), StaticInt{M}()), StaticInt{X}() * StaticInt{M}())
@inline vmul_nsw(i::MM{W,X}, ::StaticInt{M}) where {M,W,X} =
MM{W}(vmul_nsw(data(i), StaticInt{M}()), StaticInt{X}() * StaticInt{M}())
@inline vmul_fast(::StaticInt{M}, i::MM{W,X}) where {M,W,X} =
MM{W}(vmul_fast(data(i), StaticInt{M}()), StaticInt{X}() * StaticInt{M}())
@inline vmul_fast(i::MM{W,X}, ::StaticInt{M}) where {M,W,X} =
MM{W}(vmul_fast(data(i), StaticInt{M}()), StaticInt{X}() * StaticInt{M}())
@inline vmul(a, ::StaticInt{N}) where {N} = vmul_fast(a, StaticInt{N}())
@inline vmul(::StaticInt{N}, a) where {N} = vmul_fast(StaticInt{N}(), a)
@inline vmul(::StaticInt{N}, ::StaticInt{M}) where {N,M} =
StaticInt{N}() * StaticInt{M}()
@inline vsub(a, ::StaticInt{N}) where {N} = vsub_fast(a, StaticInt{N}())
@inline vadd(a, ::StaticInt{N}) where {N} = vadd_fast(a, StaticInt{N}())
@inline vsub(::StaticInt{N}, a) where {N} = vsub_fast(StaticInt{N}(), a)
@inline vadd(::StaticInt{N}, a) where {N} = vadd_fast(StaticInt{N}(), a)
@inline vsub(::StaticInt{M}, ::StaticInt{N}) where {M,N} =
StaticInt{M}() - StaticInt{N}()
@inline vadd(::StaticInt{M}, ::StaticInt{N}) where {M,N} =
StaticInt{M}() + StaticInt{N}()
@inline vrem(i::MM{W,X,I}, ::Type{I}) where {W,X,I<:IntegerTypesHW} = i
@inline vrem(i::MM{W,X}, ::Type{I}) where {W,X,I<:IntegerTypesHW} =
MM{W,X}(data(i) % I)
@inline veq(::AbstractIrrational, ::MM{W,<:Integer}) where {W} = zero(Mask{W})
@inline veq(x::AbstractIrrational, i::MM{W}) where {W} = x == Vec(i)
@inline veq(::MM{W,<:Integer}, ::AbstractIrrational) where {W} = zero(Mask{W})
@inline veq(i::MM{W}, x::AbstractIrrational) where {W} = Vec(i) == x
@inline vsub_nsw(i::MM, ::Zero) = i
@inline vsub_nsw(i::NativeTypes, j::MM{W,X}) where {W,X} =
MM(StaticInt{W}(), vsub_nsw(i, data(j)), -StaticInt{X}())
@inline vsub_fast(i::NativeTypes, j::MM{W,X}) where {W,X} =
MM(StaticInt{W}(), vsub_fast(i, data(j)), -StaticInt{X}())
@inline vsub_nsw(
i::Union{FloatingTypes,IntegerTypesHW},
j::MM{W,X}
) where {W,X} = MM(StaticInt{W}(), vsub_nsw(i, data(j)), -StaticInt{X}())
@inline vsub_fast(
i::Union{FloatingTypes,IntegerTypesHW},
j::MM{W,X}
) where {W,X} = MM(StaticInt{W}(), vsub_fast(i, data(j)), -StaticInt{X}())
@inline function Base.in(m::MM{W,X,<:Integer}, r::AbstractUnitRange) where {W,X}
vm = Vec(m)
(vm ≥ first(r)) & (vm ≤ last(r))
end
for op ∈ (:(+), :(-))
@eval begin
@inline Base.$op(vu::VecUnroll{N,1,T,T}, i::MM) where {N,T<:NativeTypes} =
VecUnroll(fmap($op, data(vu), i))
@inline Base.$op(i::MM, vu::VecUnroll{N,1,T,T}) where {N,T<:NativeTypes} =
VecUnroll(fmap($op, i, data(vu)))
@inline Base.$op(
vu::VecUnroll{N,1,T,T},
i::VecUnroll{N,W,I,MM{W,X,I}}
) where {N,W,T<:NativeTypes,I,X} = VecUnroll(fmap($op, data(vu), data(i)))
@inline Base.$op(
i::VecUnroll{N,W,I,MM{W,X,I}},
vu::VecUnroll{N,1,T,T}
) where {N,W,T<:NativeTypes,I,X} = VecUnroll(fmap($op, data(i), data(vu)))
end
end
# @inline Base.:(+)(vu::VecUnroll{N,W,T,T}, i::MM) = VecUnroll(fmap(+, data(vu), i))
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 10114 |
function binary_op(op, W, @nospecialize(T))
ty = LLVM_TYPES[T]
if isone(W)
V = T
else
ty = "<$W x $ty>"
V = NTuple{W,VecElement{T}}
end
instrs = "%res = $op $ty %0, %1\nret $ty %res"
call = :($LLVMCALL($instrs, $V, Tuple{$V,$V}, data(v1), data(v2)))
W > 1 && (call = Expr(:call, :Vec, call))
Expr(:block, Expr(:meta, :inline), call)
end
# Integer
for (op, f) ∈ [("add", :+), ("sub", :-), ("mul", :*), ("shl", :<<)]
ff = Symbol('v', op)
fnsw = Symbol(ff, "_nsw")
fnuw = Symbol(ff, "_nuw")
fnw = Symbol(ff, "_nw")
ff_fast = Symbol(ff, :_fast)
@eval begin
# @inline $ff(a,b) = $ff_fast(a,b)
@inline $ff(
a::T,
b::T
) where {T<:Union{FloatingTypes,IntegerTypesHW,AbstractSIMD}} =
$ff_fast(a, b)
# @generated $ff_fast(v1::Vec{W,T}, v2::Vec{W,T}) where {W,T<:IntegerTypesHW} = binary_op($op * (T <: Signed ? " nsw" : " nuw"), W, T)
@generated $ff_fast(
v1::Vec{W,T},
v2::Vec{W,T}
) where {W,T<:IntegerTypesHW} = binary_op($op, W, T)
@generated $fnsw(v1::Vec{W,T}, v2::Vec{W,T}) where {W,T<:IntegerTypesHW} =
binary_op($(op * " nsw"), W, T)
@generated $fnuw(v1::Vec{W,T}, v2::Vec{W,T}) where {W,T<:IntegerTypesHW} =
binary_op($(op * " nuw"), W, T)
@generated $fnw(v1::Vec{W,T}, v2::Vec{W,T}) where {W,T<:IntegerTypesHW} =
binary_op($(op * " nsw nuw"), W, T)
# @generated Base.$f(v1::Vec{W,T}, v2::Vec{W,T}) where {W,T<:IntegerTypesHW} = binary_op($op, W, T)
@inline Base.$f(v1::Vec{W,T}, v2::Vec{W,T}) where {W,T<:IntegerTypesHW} =
$ff_fast(v1, v2)
# @generated $ff(v1::Vec{W,T}, v2::Vec{W,T}) where {W,T<:IntegerTypesHW} = binary_op($op, W, T)
@inline $ff_fast(x, y) = $f(x, y)
# @generated $ff_fast(v1::T, v2::T) where {T<:IntegerTypesHW} = binary_op($op * (T <: Signed ? " nsw" : " nuw"), 1, T)
# @generated $ff_fast(v1::T, v2::T) where {T<:IntegerTypesHW} = binary_op($op, 1, T)
@generated $fnsw(v1::T, v2::T) where {T<:IntegerTypesHW} =
binary_op($(op * " nsw"), 1, T)
@generated $fnuw(v1::T, v2::T) where {T<:IntegerTypesHW} =
binary_op($(op * " nuw"), 1, T)
@generated $fnw(v1::T, v2::T) where {T<:IntegerTypesHW} =
binary_op($(op * " nsw nuw"), 1, T)
end
end
@inline vadd_fast(v1::T, v2::T) where {T<:IntegerTypesHW} = Base.add_int(v1, v2)
@inline vsub_fast(v1::T, v2::T) where {T<:IntegerTypesHW} = Base.sub_int(v1, v2)
@inline vmul_fast(v1::T, v2::T) where {T<:IntegerTypesHW} = Base.mul_int(v1, v2)
@inline vshl_fast(v1::T, v2::T) where {T<:IntegerTypesHW} = Base.shl_int(v1, v2)
for (op, f) ∈ [("div", :÷), ("rem", :%)]
ff = Symbol('v', op) #_ff = Symbol(:_, ff)
sbf = Symbol('s', op, :_int)
ubf = Symbol('u', op, :_int)
sbf128 = Sys.WORD_SIZE == 32 ? f : sbf
ubf128 = Sys.WORD_SIZE == 32 ? f : ubf
@eval begin
@generated $ff(v1::Vec{W,T}, v2::Vec{W,T}) where {W,T<:Integer} =
binary_op((T <: Signed ? 's' : 'u') * $op, W, T)
@inline $ff(a::I, b::I) where {I<:SignedHW} = Base.$sbf(a, b)
@inline $ff(a::I, b::I) where {I<:UnsignedHW} = Base.$ubf(a, b)
@inline $ff(a::Int128, b::Int128) = Base.$sbf128(a, b)
@inline $ff(a::UInt128, b::UInt128) = Base.$ubf128(a, b)
# @generated $_ff(v1::T, v2::T) where {T<:Integer} = binary_op((T <: Signed ? 's' : 'u') * $op, 1, T)
# @inline $ff(v1::T, v2::T) where {T<:IntegerTypesHW} = $_ff(v1, v2)
end
end
# for (op,f) ∈ [("div",:÷),("rem",:%)]
# @eval begin
# @generated Base.$f(v1::Vec{W,T}, v2::Vec{W,T}) where {W,T<:Integer} = binary_op((T <: Signed ? 's' : 'u') * $op, W, T)
# @generated Base.$f(v1::T, v2::T) where {T<:IntegerTypesHW} = binary_op((T <: Signed ? 's' : 'u') * $op, 1, T)
# end
# end
@inline vcld(x, y) = vadd(vdiv(vsub(x, one(x)), y), one(x))
@inline function vdivrem(x, y)
d = vdiv(x, y)
r = vsub(x, vmul(d, y))
d, r
end
for (op, sub) ∈ [
("ashr", :SignedHW),
("lshr", :UnsignedHW),
("lshr", :IntegerTypesHW),
("and", :IntegerTypesHW),
("or", :IntegerTypesHW),
("xor", :IntegerTypesHW)
]
ff = sub === :UnsignedHW ? :vashr : Symbol('v', op)
@eval begin
@generated $ff(v1::Vec{W,T}, v2::Vec{W,T}) where {W,T<:$sub} =
binary_op($op, W, T)
@generated $ff(v1::T, v2::T) where {T<:$sub} = binary_op($op, 1, T)
end
end
for (op, f) ∈
[("fadd", :vadd), ("fsub", :vsub), ("fmul", :vmul), ("fdiv", :vfdiv)]#,("frem",:vrem)]
ff = Symbol(f, :_fast)
fop_fast = f === :vfdiv ? "fdiv fast" : op * ' ' * fast_flags(true)
fop_contract = op * ' ' * fast_flags(false)
@eval begin
@generated $f(
v1::Vec{W,T},
v2::Vec{W,T}
) where {W,T<:Union{Float32,Float64}} = binary_op($fop_contract, W, T)
@generated $ff(
v1::Vec{W,T},
v2::Vec{W,T}
) where {W,T<:Union{Float32,Float64}} = binary_op($fop_fast, W, T)
@inline $f(v1::Vec{W,Float16}, v2::Vec{W,Float16}) where {W} =
$f(convert(Vec{W,Float32}, v1), convert(Vec{W,Float32}, v2))
@inline $ff(v1::Vec{W,Float16}, v2::Vec{W,Float16}) where {W} =
$ff(convert(Vec{W,Float32}, v1), convert(Vec{W,Float32}, v2))
end
end
@inline vsub(a::T, b::T) where {T<:Union{Float32,Float64}} =
Base.sub_float(a, b)
@inline vadd(a::T, b::T) where {T<:Union{Float32,Float64}} =
Base.add_float(a, b)
@inline vmul(a::T, b::T) where {T<:Union{Float32,Float64}} =
Base.mul_float(a, b)
@inline vsub_fast(a::T, b::T) where {T<:Union{Float32,Float64}} =
Base.sub_float_fast(a, b)
@inline vadd_fast(a::T, b::T) where {T<:Union{Float32,Float64}} =
Base.add_float_fast(a, b)
@inline vmul_fast(a::T, b::T) where {T<:Union{Float32,Float64}} =
Base.mul_float_fast(a, b)
@inline vdiv(
v1::AbstractSIMD{W,T},
v2::AbstractSIMD{W,T}
) where {W,T<:FloatingTypes} = trunc(vfdiv_fast(v1, v2))
@inline vdiv_fast(
v1::AbstractSIMD{W,T},
v2::AbstractSIMD{W,T}
) where {W,T<:FloatingTypes} = trunc(vfdiv_fast(v1, v2))
@inline vdiv_fast(v1::T, v2::T) where {T<:FloatingTypes} =
trunc(Base.FastMath.div_float_fast(v1, v2))
@inline vdiv_fast(v1::T, v2::T) where {T<:Number} = v1 ÷ v2
@inline vdiv(v1::T, v2::T) where {T<:Number} = v1 ÷ v2
@inline vdiv(v1::T, v2::T) where {T<:FloatingTypes} = vdiv_fast(v1, v2)
@inline vrem(a, b) = vfnmadd(vdiv_fast(a, b), b, a)
@inline vrem_fast(a, b) = vfnmadd(vdiv_fast(a, b), b, a)
# @inline vdiv_fast(v1::AbstractSIMD{W,T}, v2::AbstractSIMD{W,T}) where {W,T<:IntegerTypesHW} = trunc(T, vfloat_fast(v1) / vfloat_fast(v2))
@inline vdiv_fast(
v1::AbstractSIMD{W,T},
v2::AbstractSIMD{W,T}
) where {W,T<:IntegerTypesHW} = trunc(T, vfloat(v1) / vfloat(v2))
@inline function vdiv_fast(v1, v2)
v3, v4 = promote_div(v1, v2)
vdiv_fast(v3, v4)
end
@inline vdiv_fast(v1::VecUnroll{N,1,T,T}, s::T) where {N,T<:SignedHW} =
VecUnroll(fmap(Base.sdiv_int, data(v1), s))
@inline vdiv_fast(v1::VecUnroll{N,1,T,T}, s::T) where {N,T<:UnsignedHW} =
VecUnroll(fmap(Base.udiv_int, data(v1), s))
@inline vdiv_fast(
v1::VecUnroll{N,1,T,T},
v2::VecUnroll{N,1,T,T}
) where {N,T<:SignedHW} = VecUnroll(fmap(Base.sdiv_int, data(v1), data(v2)))
@inline vdiv_fast(
v1::VecUnroll{N,1,T,T},
v2::VecUnroll{N,1,T,T}
) where {N,T<:UnsignedHW} = VecUnroll(fmap(Base.udiv_int, data(v1), data(v2)))
@inline vfdiv(a::AbstractSIMDVector{W}, b::AbstractSIMDVector{W}) where {W} =
vfdiv(vfloat(a), vfloat(b))
# @inline vfdiv_fast(a::AbstractSIMDVector{W}, b::AbstractSIMDVector{W}) where {W} = vfdiv_fast(vfloat_fast(a), vfloat_fast(b))
@inline vfdiv_fast(
a::AbstractSIMDVector{W},
b::AbstractSIMDVector{W}
) where {W} = vfdiv_fast(vfloat(a), vfloat(b))
@inline vfdiv(a, b) = a / b
@inline vfdiv_fast(a, b) = Base.FastMath.div_fast(a, b)
for (f, op) ∈ ((:vand, :and_int), (:vor, :or_int), (:vxor, :xor_int))
@eval @inline $f(b1::Bool, b2::Bool) = Base.$op(b1, b2)
end
for f ∈ [:vadd, :vsub, :vmul]
for s ∈ [Symbol(""), :_fast, :_nsw, :_nuw, :_nw]
fs = Symbol(f, s)
@eval begin
@inline function $fs(
a::Union{FloatingTypes,IntegerTypesHW,AbstractSIMD},
b::Union{FloatingTypes,IntegerTypesHW,AbstractSIMD}
)
c, d = promote(a, b)
$fs(c, d)
end
end
end
end
# @inline vsub(a::T, b::T) where {T<:Base.BitInteger} = Base.sub_int(a, b)
for (vf, bf) ∈ [
(:vadd, :add_int),
(:vsub, :sub_int),
(:vmul, :mul_int),
(:vadd_fast, :add_int),
(:vsub_fast, :sub_int),
(:vmul_fast, :mul_int),
(:vadd_nsw, :add_int),
(:vsub_nsw, :sub_int),
(:vmul_nsw, :mul_int),
(:vadd_nuw, :add_int),
(:vsub_nuw, :sub_int),
(:vmul_nuw, :mul_int),
(:vadd_nw, :add_int),
(:vsub_nw, :sub_int),
(:vmul_nw, :mul_int)
]
@eval begin
@inline $vf(a::Int128, b::Int128) = Base.$bf(a, b)
@inline $vf(a::UInt128, b::UInt128) = Base.$bf(a, b)
end
end
# @inline vrem(a::Float32, b::Float32) = Base.rem_float_fast(a, b)
# @inline vrem(a::Float64, b::Float64) = Base.rem_float_fast(a, b)
@inline function Base.FastMath.add_fast(
a::AbstractSIMD,
b::AbstractSIMD,
c::AbstractSIMD
)
Base.FastMath.add_fast(Base.FastMath.add_fast(a, b), c)
end
@inline function Base.FastMath.add_fast(
a::T,
b::T,
c::T
) where {T<:AbstractSIMD}
Base.FastMath.add_fast(Base.FastMath.add_fast(a, b), c)
end
@inline function Base.FastMath.add_fast(
a::AbstractSIMD,
b::AbstractSIMD,
c::AbstractSIMD,
d::AbstractSIMD
)
x = Base.FastMath.add_fast(a, b)
y = Base.FastMath.add_fast(c, d)
Base.FastMath.add_fast(x, y)
end
@inline function Base.FastMath.add_fast(
a::T,
b::T,
c::T,
d::T
) where {T<:AbstractSIMD}
x = Base.FastMath.add_fast(a, b)
y = Base.FastMath.add_fast(c, d)
Base.FastMath.add_fast(x, y)
end
@inline function Base.FastMath.add_fast(
a::AbstractSIMD,
b::AbstractSIMD,
c::AbstractSIMD,
d::AbstractSIMD,
e::AbstractSIMD,
f::Vararg{Number,K}
) where {K}
x = Base.FastMath.add_fast(a, b)
y = Base.FastMath.add_fast(c, d)
Base.FastMath.add_fast(Base.FastMath.add_fast(x, y), e, f...)
end
@inline function Base.FastMath.add_fast(
a::T,
b::T,
c::T,
d::T,
e::T,
f::Vararg{T,K}
) where {T<:AbstractSIMD,K}
x = Base.FastMath.add_fast(a, b)
y = Base.FastMath.add_fast(c, d)
Base.FastMath.add_fast(Base.FastMath.add_fast(x, y), e, f...)
end
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 585 |
function conflictquote(W::Int = 16, bits::Int = 32)
@assert bits == 32 || bits == 64
s = bits == 32 ? 'd' : 'q'
typ = "i$(bits)"
vtyp = "<$W x $(typ)>"
op = "@llvm.x86.avx512.conflict.$s.$(bits*W)"
decl = "declare <$W x $(typ)> $op(<$W x $(typ)>)"
instrs = "%res = call <$W x $(typ)> $op(<$W x $(typ)> %0)\n ret <$W x $(typ)> %res"
T = Symbol(:UInt, bits)
llvmcall_expr(
decl,
instrs,
:(_Vec{$W,$T}),
:(Tuple{_Vec{$W,$T}}),
vtyp,
[vtyp],
[:(data(v))]
)
end
@generated vpconflict(v::Vec{W,T}) where {W,T} = conflictquote(W, 8sizeof(T))
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 14517 | function convert_func(
op::String,
@nospecialize(T1),
W1::Int,
@nospecialize(T2),
W2::Int = W1
)
typ1 = LLVM_TYPES[T1]
typ2 = LLVM_TYPES[T2]
vtyp1 = vtype(W1, typ1)
vtyp2 = vtype(W2, typ2)
instrs = """
%res = $op $vtyp2 %0 to $vtyp1
ret $vtyp1 %res
"""
quote
$(Expr(:meta, :inline))
Vec($LLVMCALL($instrs, _Vec{$W1,$T1}, Tuple{_Vec{$W2,$T2}}, data(v)))
end
end
# For bitcasting between signed and unsigned integers (LLVM does not draw a distinction, but they're separate in Julia)
function identity_func(W, T1, T2)
vtyp1 = vtype(W, LLVM_TYPES[T1])
instrs = "ret $vtyp1 %0"
quote
$(Expr(:meta, :inline))
Vec($LLVMCALL($instrs, _Vec{$W,$T1}, Tuple{_Vec{$W,$T2}}, data(v)))
end
end
### `vconvert(::Type{<:AbstractSIMDVector}, x)` methods
### These are the critical `vconvert` methods; scalar and `VecUnroll` are implemented with respect to them.
if (Sys.ARCH === :x86_64) || (Sys.ARCH === :i686)
@generated function _vconvert(
::Type{Vec{W,F}},
v::Vec{W,T},
::True
) where {W,F<:FloatingTypes,T<:IntegerTypesHW}
convert_func(T <: Signed ? "sitofp" : "uitofp", F, W, T)
end
@inline reinterpret_half(v::AbstractSIMD{W,Int64}) where {W} =
reinterpret(Int32, v)
@inline reinterpret_half(v::AbstractSIMD{W,UInt64}) where {W} =
reinterpret(UInt32, v)
@inline function _vconvert(::Type{Vec{W,F}}, v::VecUnroll, ::True) where {W,F}
VecUnroll(fmap(_vconvert, Vec{W,F}, getfield(v, :data), True()))
end
@inline function _vconvert(
::Type{Vec{W,F}},
v::AbstractSIMD{W,UInt64},
::False
) where {W,F}
v32 = reinterpret_half(v)
vl = extractlower(v32)
vu = extractupper(v32)
x = _vconvert(Vec{W,F}, vu % UInt32, True())
vfmadd_fast(F(4.294967296e9), _vconvert(Vec{W,F}, vl, True()), x)
end
@inline function _vconvert(
::Type{Vec{W,F}},
v::AbstractSIMD{W,Int64},
::False
) where {W,F}
neg = v < 0
pos = ifelse(neg, -v, v)
posf = _vconvert(Vec{W,F}, UInt64(pos), False())
ifelse(neg, -posf, posf)
end
@inline function vconvert(
::Type{Vec{W,F}},
v::Vec{W,T}
)::Vec{W,F} where {W,F<:FloatingTypes,T<:IntegerTypesHW}
_vconvert(Vec{W,F}, v, True())::Vec{W,F}
end
@inline function vconvert(
::Type{Vec{W,F}},
v::Vec{W,T}
)::Vec{W,F} where {W,F<:FloatingTypes,T<:Union{UInt64,Int64}}
_vconvert(
Vec{W,F},
v,
has_feature(Val(:x86_64_avx512dq)) | (!has_feature(Val(:x86_64_avx2)))
)::Vec{W,F}
end
@inline function vconvert(
::Type{F},
v::VecUnroll{N,W,T,Vec{W,T}}
)::VecUnroll{N,W,F,Vec{W,F}} where {N,W,F<:FloatingTypes,T<:Union{UInt64,Int64}}
_vconvert(
Vec{W,F},
v,
has_feature(Val(:x86_64_avx512dq)) | (!has_feature(Val(:x86_64_avx2)))
)::VecUnroll{N,W,F,Vec{W,F}}
end
@inline function vconvert(
::Type{Vec{W,F}},
v::VecUnroll{N,W,T,Vec{W,T}}
)::VecUnroll{N,W,F,Vec{W,F}} where {N,W,F<:FloatingTypes,T<:Union{UInt64,Int64}}
_vconvert(
Vec{W,F},
v,
has_feature(Val(:x86_64_avx512dq)) | (!has_feature(Val(:x86_64_avx2)))
)::VecUnroll{N,W,F,Vec{W,F}}
end
@inline function vconvert(
::Type{VecUnroll{N,W,F,Vec{W,F}}},
v::VecUnroll{N,W,T,Vec{W,T}}
)::VecUnroll{N,W,F,Vec{W,F}} where {N,W,F<:FloatingTypes,T<:Union{UInt64,Int64}}
_vconvert(
Vec{W,F},
v,
has_feature(Val(:x86_64_avx512dq)) | (!has_feature(Val(:x86_64_avx2)))
)::VecUnroll{N,W,F,Vec{W,F}}
end
else
@generated function vconvert(
::Type{Vec{W,F}},
v::Vec{W,T}
)::Vec{W,F} where {W,F<:FloatingTypes,T<:IntegerTypesHW}
convert_func(T <: Signed ? "sitofp" : "uitofp", F, W, T)
end
end
@generated function vconvert(
::Type{Vec{W,T}},
v::Vec{W,F}
) where {W,F<:FloatingTypes,T<:IntegerTypesHW}
convert_func(T <: Signed ? "fptosi" : "fptoui", T, W, F)
end
@generated function vconvert(
::Type{Vec{W,T1}},
v::Vec{W,T2}
) where {W,T1<:IntegerTypesHW,T2<:IntegerTypesHW}
sz1 = sizeof(T1)::Int
sz2 = sizeof(T2)::Int
if sz1 < sz2
convert_func("trunc", T1, W, T2)
elseif sz1 == sz2
identity_func(W, T1, T2)
else
convert_func(
((T1 <: Signed) && (T2 <: Signed)) ? "sext" : "zext",
T1,
W,
T2
)
end
end
@inline vconvert(::Type{Vec{W,Float16}}, v::Vec{W,Float64}) where {W} =
vconvert(Vec{W,Float16}, vconvert(Vec{W,Float32}, v))
@inline vconvert(::Type{Vec{W,Float64}}, v::Vec{W,Float16}) where {W} =
vconvert(Vec{W,Float64}, vconvert(Vec{W,Float32}, v))
@generated vconvert(::Type{Vec{W,Float16}}, v::Vec{W,Float32}) where {W} =
convert_func("fptrunc", Float16, W, Float32, W)
@generated vconvert(::Type{Vec{W,Float32}}, v::Vec{W,Float16}) where {W} =
convert_func("fpext", Float32, W, Float16, W)
@generated vconvert(::Type{Vec{W,Float32}}, v::Vec{W,Float64}) where {W} =
convert_func("fptrunc", Float32, W, Float64, W)
@generated vconvert(::Type{Vec{W,Float64}}, v::Vec{W,Float32}) where {W} =
convert_func("fpext", Float64, W, Float32, W)
@inline vconvert(::Type{<:AbstractMask{W}}, v::Vec{W,Bool}) where {W} =
tomask(v)
@inline vconvert(::Type{M}, v::Vec{W,Bool}) where {W,U,M<:AbstractMask{W,U}} =
tomask(v)
@inline vconvert(
::Type{<:VectorizationBase.AbstractMask{W,U} where {U}},
v::Vec{W,Bool}
) where {W} = VectorizationBase.tomask(v)
@inline vconvert(
::Type{<:VectorizationBase.AbstractMask{L,U} where {L,U}},
v::Vec{W,Bool}
) where {W} = VectorizationBase.tomask(v)
# @inline vconvert(::Type{Mask}, v::Vec{W,Bool}) where {W} = tomask(v)
# @generated function vconvert(::Type{<:AbstractMask{W}}, v::Vec{W,Bool}) where {W}
# instrs = String[]
# push!(instrs, "%m = trunc <$W x i8> %0 to <$W x i1>")
# zext_mask!(instrs, 'm', W, '0')
# push!(instrs, "ret i$(max(8,W)) %res.0")
# U = mask_type_symbol(W);
# quote
# $(Expr(:meta,:inline))
# Mask{$W}($LLVMCALL($(join(instrs, "\n")), $U, Tuple{_Vec{$W,Bool}}, data(v)))
# end
# end
@inline vconvert(::Type{Vec{W,Bit}}, v::Vec{W,Bool}) where {W,Bool} = tomask(v)
@inline vconvert(::Type{Vec{W,T}}, v::Vec{W,T}) where {W,T<:IntegerTypesHW} = v
@inline vconvert(::Type{Vec{W,T}}, v::Vec{W,T}) where {W,T} = v
@inline vconvert(::Type{Vec{W,T}}, s::NativeTypes) where {W,T} =
vbroadcast(Val{W}(), T(s))
@inline vconvert(::Type{Vec{W,Bool}}, s::Bool) where {W} =
vconvert(Vec{W,Bool}, vbroadcast(Val{W}(), s))
@inline vconvert(
::Type{Vec{W,T}},
s::IntegerTypesHW
) where {W,T<:IntegerTypesHW} =
_vbroadcast(StaticInt{W}(), s % T, StaticInt{W}() * static_sizeof(T))
@inline vconvert(::Type{V}, u::VecUnroll) where {V<:AbstractSIMDVector} =
VecUnroll(fmap(vconvert, V, getfield(u, :data)))
@inline vconvert(
::Type{V},
u::VecUnroll{N,W,T,V}
) where {N,W,T,V<:AbstractSIMDVector} = u
@inline vconvert(::Type{<:AbstractSIMDVector{W,T}}, i::MM{W,X}) where {W,X,T} =
vrangeincr(Val{W}(), T(data(i)), Val{0}(), Val{X}())
@inline vconvert(::Type{MM{W,X,T}}, i::MM{W,X}) where {W,X,T} =
MM{W,X}(T(getfield(i, :i)))
@inline function vconvert(
::Type{V},
v::AbstractMask{W}
) where {W,T<:Union{Base.HWReal,Bool},V<:AbstractSIMDVector{W,T}}
vifelse(v, one(T), zero(T))
end
@inline vconvert(
::Type{V},
v::AbstractMask{W}
) where {W,V<:AbstractSIMDVector{W,Bit}} = v
@inline function vconvert(
::Type{V},
v::Vec{W,Bool}
) where {W,T<:Base.HWReal,V<:AbstractSIMDVector{W,T}}
vifelse(v, one(T), zero(T))
end
### `vconvert(::Type{<:NativeTypes}, x)` methods. These forward to `vconvert(::Type{Vec{W,T}}, x)`
@inline vconvert(::Type{T}, s::T) where {T<:NativeTypes} = s
@inline vconvert(::Type{T}, s::T) where {T<:IntegerTypesHW} = s
@inline vconvert(::Type{T}, s::Union{Float16,Float32,Float64}) where {T<:IntegerTypesHW} = Base.fptosi(T, Base.trunc_llvm(s))
@inline vconvert(::Type{T}, s::IntegerTypesHW) where {T<:Union{Float16,Float32,Float64}} = convert(T, s)::T
@inline vconvert(::Type{T}, s::Union{Float16,Float32,Float64}) where {T<:Union{Float16,Float32,Float64}} = convert(T, s)::T
@inline vconvert(::Type{T}, s::T) where {T<:Union{Float16,Float32,Float64}} = s
@inline vconvert(::Type{T}, s::IntegerTypesHW) where {T<:IntegerTypesHW} = s % T
@inline vconvert(::Type{T}, v::AbstractSIMD{W,T}) where {T<:NativeTypes,W} = v
@inline vconvert(::Type{T}, v::AbstractSIMD{W,S}) where {T<:NativeTypes,S,W} =
vconvert(Vec{W,T}, v)
### `vconvert(::Type{<:VecUnroll}, x)` methods
@inline vconvert(::Type{VecUnroll{N,W,T,V}}, s::NativeTypes) where {N,W,T,V} =
VecUnroll{N,W,T,V}(vconvert(V, s))
@inline function _vconvert(
::Type{VecUnroll{N,W,T,V}},
v::AbstractSIMDVector{W}
) where {N,W,T,V}
VecUnroll{N,W,T,V}(vconvert(V, v))
end
@inline function vconvert(
::Type{VecUnroll{N,W,T,V}},
v::VecUnroll{N}
) where {N,W,T,V}
VecUnroll(fmap(vconvert, V, getfield(v, :data)))
end
@inline vconvert(
::Type{VecUnroll{N,W,T,V}},
v::VecUnroll{N,W,T,V}
) where {N,W,T,V} = v
@generated function vconvert(
::Type{VecUnroll{N,1,T,T}},
s::NativeTypes
) where {N,T}
quote
$(Expr(:meta, :inline))
x = convert($T, s)
VecUnroll((Base.Cartesian.@ntuple $(N + 1) n -> x))
end
end
@generated function VecUnroll{N,W,T,V}(x::V) where {N,W,T,V<:Real}
q = Expr(:block, Expr(:meta, :inline))
t = Expr(:tuple)
for n ∈ 0:N
push!(t.args, :x)
end
push!(q.args, :(VecUnroll($t)))
q
end
# @inline vconvert(::Type{T}, v::T) where {T} = v
@generated function splitvectortotuple(
::StaticInt{N},
::StaticInt{W},
v::AbstractMask{L}
) where {N,W,L}
N * W == L || throw(
ArgumentError(
"Can't split a vector of length $L into $N pieces of length $W."
)
)
t = Expr(:tuple, :(Mask{$W}(u)))
s = 0
for n ∈ 2:N
push!(t.args, :(Mask{$W}(u >>> $(s += W))))
end
# This `vconvert` will dispatch to one of the following two `vconvert` methods
Expr(:block, Expr(:meta, :inline), :(u = data(v)), t)
end
@generated function splitvectortotuple(
::StaticInt{N},
::StaticInt{W},
v::AbstractSIMDVector{L}
) where {N,W,L}
N * W == L || throw(
ArgumentError(
"Can't split a vector of length $L into $N pieces of length $W."
)
)
t = Expr(:tuple)
j = 0
for i ∈ 1:N
val = Expr(:tuple)
for w ∈ 1:W
push!(val.args, j)
j += 1
end
push!(t.args, :(shufflevector(v, Val{$val}())))
end
Expr(:block, Expr(:meta, :inline), t)
end
@generated function splitvectortotuple(
::StaticInt{N},
::StaticInt{W},
v::LazyMulAdd{M,O}
) where {N,W,M,O}
# LazyMulAdd{M,O}(splitvectortotuple(StaticInt{N}(), StaticInt{W}(), getfield(v, :data)))
t = Expr(:tuple)
for n ∈ 1:N
push!(t.args, :(LazyMulAdd{$M,$O}(splitdata[$n])))
end
Expr(
:block,
Expr(:meta, :inline),
:(
splitdata = splitvectortotuple(
StaticInt{$N}(),
StaticInt{$W}(),
getfield(v, :data)
)
),
t
)
end
@generated function vconvert(
::Type{VecUnroll{N,W,T,V}},
v::AbstractSIMDVector{L}
) where {N,W,T,V,L}
if W == L # _vconvert will dispatch to one of the two above
Expr(:block, Expr(:meta, :inline), :(_vconvert(VecUnroll{$N,$W,$T,$V}, v)))
else
Expr(
:block,
Expr(:meta, :inline),
:(vconvert(
VecUnroll{$N,$W,$T,$V},
VecUnroll(
splitvectortotuple(StaticInt{$(N + 1)}(), StaticInt{$W}(), v)
)
))
)
end
end
@inline Vec{W,T}(v::Vec{W,S}) where {W,T,S} = vconvert(Vec{W,T}, v)
@inline Vec{W,T}(v::S) where {W,T,S<:NativeTypes} = vconvert(Vec{W,T}, v)
@inline vsigned(v::AbstractSIMD{W,T}) where {W,T<:Base.BitInteger} =
v % signed(T)
@inline vunsigned(v::AbstractSIMD{W,T}) where {W,T<:Base.BitInteger} =
v % unsigned(T)
@generated function _vfloat(
v::AbstractSIMD{W,I},
::StaticInt{RS}
) where {W,I<:Integer,RS}
ex = if 8W ≤ RS
:(vconvert(Vec{$W,Float64}, v))
else
:(vconvert(Vec{$W,Float32}, v))
end
Expr(:block, Expr(:meta, :inline), ex)
end
@inline vfloat(v::AbstractSIMD{W,I}) where {W,I<:Integer} =
_vfloat(v, register_size())
@inline vfloat(v::AbstractSIMD{W,T}) where {W,T<:Union{Float32,Float64}} = v
@inline vfloat(v::AbstractSIMD{W,Float16}) where {W} = vconvert(Float32, v)
# @inline vfloat(vu::VecUnroll) = VecUnroll(fmap(vfloat, getfield(vu, :data)))
@inline vfloat(x::Union{Float32,Float64}) = x
@inline vfloat(x::UInt64) = Base.uitofp(Float64, x)
@inline vfloat(x::Int64) = Base.sitofp(Float64, x)
@inline vfloat(x::Union{UInt8,UInt16,UInt32}) = Base.uitofp(Float32, x)
@inline vfloat(x::Union{Int8,Int16,Int32}) = Base.sitofp(Float32, x)
# @inline vfloat(v::Vec{W,I}) where {W, I <: Union{UInt64, Int64}} = Vec{W,Float64}(v)
@inline vfloat_fast(
v::AbstractSIMDVector{W,T}
) where {W,T<:Union{Float32,Float64}} = v
@inline vfloat_fast(vu::VecUnroll{W,T}) where {W,T<:Union{Float32,Float64}} = vu
@inline vfloat_fast(vu::VecUnroll) =
VecUnroll(fmap(vfloat_fast, getfield(vu, :data)))
@generated function __vfloat_fast(
v::Vec{W,I},
::StaticInt{RS}
) where {W,I<:Integer,RS}
arg = if (2W * sizeof(I) ≤ RS) || sizeof(I) ≤ 4
:v
elseif I <: Signed
:(v % Int32)
else
:(v % UInt32)
end
ex = if 8W ≤ RS
:(Vec{$W,Float64}($arg))
else
:(Vec{$W,Float32}($arg))
end
Expr(:block, Expr(:meta, :inline), ex)
end
@inline _vfloat_fast(v, ::False) = __vfloat_fast(v, register_size())
@inline _vfloat_fast(v, ::True) = vfloat(v)
@inline vfloat_fast(v::Vec) =
_vfloat_fast(v, has_feature(Val(:x86_64_avx512dq)))
@inline vreinterpret(::Type{T}, x::S) where {T,S<:NativeTypes} =
reinterpret(T, x)
@inline vreinterpret(::Type{Vec{1,T}}, x::S) where {T,S<:NativeTypes} =
reinterpret(T, x)
@inline vrem(x::NativeTypes, ::Type{T}) where {T} = x % T
@generated function vreinterpret(
::Type{T1},
v::Vec{W2,T2}
) where {W2,T1<:NativeTypes,T2}
W1 = W2 * sizeof(T2) ÷ sizeof(T1)
Expr(:block, Expr(:meta, :inline), :(vreinterpret(Vec{$W1,$T1}, v)))
end
@inline vreinterpret(
::Type{Vec{1,T1}},
v::Vec{W,T2}
) where {W,T1,T2<:Base.BitInteger} = reinterpret(T1, fuseint(v))
@generated function vreinterpret(
::Type{Vec{W1,T1}},
v::Vec{W2,T2}
) where {W1,W2,T1,T2}
@assert sizeof(T1) * W1 == W2 * sizeof(T2)
convert_func("bitcast", T1, W1, T2, W2)
end
@inline vunsafe_trunc(::Type{I}, v::Vec{W,T}) where {W,I,T} =
vconvert(Vec{W,I}, v)
@inline vrem(v::AbstractSIMDVector{W,T}, ::Type{I}) where {W,I,T} =
vconvert(Vec{W,I}, v)
@inline vrem(
v::AbstractSIMDVector{W,T},
::Type{V}
) where {W,I,T,V<:AbstractSIMD{W,I}} = vconvert(V, v)
@inline vrem(r::IntegerTypesHW, ::Type{V}) where {W,I,V<:AbstractSIMD{W,I}} =
convert(V, r % I)
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 3982 |
# This is experimental, as few arches support it, and I can't think of many uses other than floating point RNGs.
@inline __ifmalo(v1, v2, v3) =
((((v1 % UInt64)) * ((v2 % UInt64))) & 0x000fffffffffffff) + (v3 % UInt64)
@inline _ifmalo(v1, v2, v3) = __ifmalo(v1, v2, v3)
function ifmahi_quote(W)
mask = W > 1 ? llvmconst(W, "i64 4503599627370495") : "4503599627370495"
shift = W > 1 ? llvmconst(W, "i128 52") : "52"
t64 = W > 1 ? "<$W x i64>" : "i64"
t128 = W > 1 ? "<$W x i128>" : "i128"
instrs = """
%a52 = and $t64 %0, $mask
%b52 = and $t64 %1, $mask
%a128 = zext $t64 %a52 to $t128
%b128 = zext $t64 %b52 to $t128
%c128 = mul $t128 %a128, %b128
%csr = lshr $t128 %c128, $shift
%c64 = trunc $t128 %csr to $t64
%res = add $t64 %c64, %2
ret $t64 %res
"""
jt = W > 1 ? :(_Vec{$W,UInt64}) : :UInt64
call =
:($LLVMCALL($instrs, $jt, Tuple{$jt,$jt,$jt}, data(v1), data(v2), data(v3)))
W > 1 && (call = Expr(:call, :Vec, call))
Expr(:block, Expr(:meta, :inline), call)
end
@generated _ifmahi(v1::UInt64, v2::UInt64, v3::UInt64) = ifmahi_quote(1)
@generated __ifmahi(
v1::Vec{W,UInt64},
v2::Vec{W,UInt64},
v3::Vec{W,UInt64}
) where {W} = ifmahi_quote(W)
function ifmaquote(W::Int, lo::Bool)
op =
lo ? "@llvm.x86.avx512.vpmadd52l.uq.$(64W)" :
"@llvm.x86.avx512.vpmadd52h.uq.$(64W)"
decl = "declare <$W x i64> $op(<$W x i64>, <$W x i64>, <$W x i64>)"
instrs = "%res = call <$W x i64> $op(<$W x i64> %0, <$W x i64> %1, <$W x i64> %2)\n ret <$W x i64> %res"
llvmcall_expr(
decl,
instrs,
:(_Vec{$W,UInt64}),
:(Tuple{_Vec{$W,UInt64},_Vec{$W,UInt64},_Vec{$W,UInt64}}),
"<$W x i64>",
["<$W x i64>", "<$W x i64>", "<$W x i64>"],
[:(data(v3)), :(data(v1)), :(data(v2))]
)
end
@generated _ifmalo(
v1::Vec{W,UInt64},
v2::Vec{W,UInt64},
v3::Vec{W,UInt64},
::True
) where {W} = ifmaquote(W, true)
@generated _ifmahi(
v1::Vec{W,UInt64},
v2::Vec{W,UInt64},
v3::Vec{W,UInt64},
::True
) where {W} = ifmaquote(W, false)
@inline _ifmalo(
v1::Vec{W,UInt64},
v2::Vec{W,UInt64},
v3::Vec{W,UInt64},
::False
) where {W} = __ifmalo(v1, v2, v3)
@inline _ifmahi(
v1::Vec{W,UInt64},
v2::Vec{W,UInt64},
v3::Vec{W,UInt64},
::False
) where {W} = __ifmahi(v1, v2, v3)
@inline function _ifmalo(
v1::Vec{W,UInt64},
v2::Vec{W,UInt64},
v3::Vec{W,UInt64}
) where {W}
use_ifma =
(has_feature(Val(:x86_64_avx512ifma)) & _ispow2(StaticInt{W}())) & (
gt(StaticInt{W}(), StaticInt{8}()) &
le(StaticInt{W}() * StaticInt{8}(), register_size())
)
_ifmalo(v1, v2, v3, use_ifma)
end
@inline function _ifmahi(
v1::Vec{W,UInt64},
v2::Vec{W,UInt64},
v3::Vec{W,UInt64}
) where {W}
use_ifma =
(has_feature(Val(:x86_64_avx512ifma)) & _ispow2(StaticInt{W}())) & (
gt(StaticInt{W}(), StaticInt{8}()) &
le(StaticInt{W}() * StaticInt{8}(), register_size())
)
_ifmahi(v1, v2, v3, use_ifma)
end
"""
ifmalo(v1, v2, v3)
Multiply unsigned integers `v1` and `v2`, adding the lower 52 bits to `v3`.
Requires `has_feature(Val(:x86_64_avx512ifma))` to be fast.
"""
@inline ifmalo(v1, v2, v3) = _ifmalo(v1 % UInt64, v2 % UInt64, v3 % UInt64)
"""
ifmalo(v1, v2, v3)
Multiply unsigned integers `v1` and `v2`, adding the upper 52 bits to `v3`.
Requires `has_feature(Val(:x86_64_avx512ifma))` to be fast.
"""
@inline ifmahi(v1, v2, v3) =
((a, b, c) = promote(v1 % UInt64, v2 % UInt64, v3 % UInt64); _ifmahi(a, b, c))
@inline function _vfmadd_fast_uint64(
a::Vec{W,UInt64},
b::Vec{W,UInt64},
c::Vec{W,UInt64},
::True
) where {W}
ifmalo(a, b, c)
end
@inline function _vfmadd_fast_uint64(
a::Vec{W,UInt64},
b::Vec{W,UInt64},
c::Vec{W,UInt64},
::False
) where {W}
Base.FastMath.add_fast(Base.FastMath.mul_fast(a, b), c)
end
@inline function vfmadd_fast(
a::Vec{W,UInt64},
b::Vec{W,UInt64},
c::Vec{W,UInt64}
) where {W}
_vfmadd_fast_uint64(a, b, c, has_feature(Val(:x86_64_avx512ifma)))
end
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 45965 |
@generated function saturated(::F, x::I, y::I) where {I<:IntegerTypesHW,F}
typ = "i$(8sizeof(I))"
s = I <: Signed ? 's' : 'u'
op = F === typeof(+) ? "add" : "sub"
f = "@llvm.$(s)$(op).sat.$typ"
decl = "declare $typ $f($typ, $typ)"
instrs = """
%res = call $typ $f($typ %0, $typ %1)
ret $typ %res
"""
llvmcall_expr(
decl,
instrs,
JULIA_TYPES[I],
:(Tuple{$I,$I}),
typ,
[typ, typ],
[:x, :y]
)
end
@generated function saturated(::F, x::Vec{W,I}, y::Vec{W,I}) where {W,I,F}
typ = "i$(8sizeof(I))"
vtyp = "<$W x $(typ)>"
s = I <: Signed ? 's' : 'u'
op = F === typeof(+) ? "add" : "sub"
f = "@llvm.$(s)$(op).sat.$(suffix(W,typ))"
decl = "declare $vtyp $f($vtyp, $vtyp)"
instrs = """
%res = call $vtyp $f($vtyp %0, $vtyp %1)
ret $vtyp %res
"""
llvmcall_expr(
decl,
instrs,
:(_Vec{$W,$I}),
:(Tuple{_Vec{$W,$I},_Vec{$W,$I}}),
vtyp,
[vtyp, vtyp],
[:(data(x)), :(data(y))]
)
end
@inline saturated_add(x, y) = saturated(+, x, y)
@inline saturated_sub(x, y) = saturated(-, x, y)
@eval @inline function assume(b::Bool)
$(llvmcall_expr(
"declare void @llvm.assume(i1)",
"%b = trunc i8 %0 to i1\ncall void @llvm.assume(i1 %b)\nret void",
:Cvoid,
:(Tuple{Bool}),
"void",
["i8"],
[:b]
))
end
@generated function bitreverse(i::I) where {I<:Base.BitInteger}
typ = string('i', 8sizeof(I))
f = "$typ @llvm.bitreverse.$(typ)"
decl = "declare $f($typ)"
instr = "%res = call $f($(typ) %0)\nret $(typ) %res"
llvmcall_expr(decl, instr, JULIA_TYPES[I], :(Tuple{$I}), typ, [typ], [:i])
end
# Doesn't work, presumably because the `noalias` doesn't propagate outside the function boundary.
# @generated function noalias!(ptr::Ptr{T}) where {T <: NativeTypes}
# Base.libllvm_version ≥ v"11" || return :ptr
# typ = LLVM_TYPES[T]
# # if Base.libllvm_version < v"10"
# # funcname = "noalias" * typ
# # decls = "define noalias $typ* @$(funcname)($typ *%a) willreturn noinline { ret $typ* %a }"
# # instrs = """
# # %ptr = inttoptr $ptyp %0 to $typ*
# # %naptr = call $typ* @$(funcname)($typ* %ptr)
# # %jptr = ptrtoint $typ* %naptr to $ptyp
# # ret $ptyp %jptr
# # """
# # else
# decls = "declare void @llvm.assume(i1)"
# instrs = """
# %ptr = inttoptr $(JULIAPOINTERTYPE) %0 to $typ*
# call void @llvm.assume(i1 true) ["noalias"($typ* %ptr)]
# %int = ptrtoint $typ* %ptr to $(JULIAPOINTERTYPE)
# ret $(JULIAPOINTERTYPE) %int
# """
# llvmcall_expr(decls, instrs, :(Ptr{$T}), :(Tuple{Ptr{$T}}), JULIAPOINTERTYPE, [JULIAPOINTERTYPE], [:ptr])
# end
# @inline noalias!(x) = x
# @eval @inline function expect(b::Bool)
# $(llvmcall_expr("declare i1 @llvm.expect.i1(i1, i1)", """
# %b = trunc i8 %0 to i1
# %actual = call i1 @llvm.expect.i1(i1 %b, i1 true)
# %byte = zext i1 %actual to i8
# ret i8 %byte""", :Bool, :(Tuple{Bool}), "i8", ["i8"], [:b]))
# end
# @generated function expect(i::I, ::Val{N}) where {I <: Union{Integer,StaticInt}, N}
# ityp = 'i' * string(8sizeof(I))
# llvmcall_expr("declare i1 @llvm.expect.$ityp($ityp, i1)", """
# %actual = call $ityp @llvm.expect.$ityp($ityp %0, $ityp $N)
# ret $ityp %actual""", I, :(Tuple{$I}), ityp, [ityp], [:i])
# end
# for (op,f) ∈ [("abs",:abs)]
# end
if Base.libllvm_version ≥ v"12"
for (op, f, S) ∈ [
("smax", :max, :Signed),
("smin", :min, :Signed),
("umax", :max, :Unsigned),
("umin", :min, :Unsigned)
]
vf = Symbol(:v, f)
@eval @generated $vf(v1::Vec{W,T}, v2::Vec{W,T}) where {W,T<:$S} =
(TS = JULIA_TYPES[T]; build_llvmcall_expr($op, W, TS, [W, W], [TS, TS]))
@eval @inline $vf(v1::Vec{W,<:$S}, v2::Vec{W,<:$S}) where {W} =
((v3, v4) = promote(v1, v2); $vf(v3, v4))
end
# TODO: clean this up.
@inline vmax(v1::Vec{W,<:Signed}, v2::Vec{W,<:Unsigned}) where {W} =
vifelse(v1 > v2, v1, v2)
@inline vmin(v1::Vec{W,<:Signed}, v2::Vec{W,<:Unsigned}) where {W} =
vifelse(v1 < v2, v1, v2)
@inline vmax(v1::Vec{W,<:Unsigned}, v2::Vec{W,<:Signed}) where {W} =
vifelse(v1 > v2, v1, v2)
@inline vmin(v1::Vec{W,<:Unsigned}, v2::Vec{W,<:Signed}) where {W} =
vifelse(v1 < v2, v1, v2)
else
@inline vmax(
v1::Vec{W,<:Union{Integer,StaticInt}},
v2::Vec{W,<:Union{Integer,StaticInt}}
) where {W} = vifelse(v1 > v2, v1, v2)
@inline vmin(
v1::Vec{W,<:Union{Integer,StaticInt}},
v2::Vec{W,<:Union{Integer,StaticInt}}
) where {W} = vifelse(v1 < v2, v1, v2)
end
@inline vmax_fast(
v1::Vec{W,<:Union{Integer,StaticInt}},
v2::Vec{W,<:Union{Integer,StaticInt}}
) where {W} = vmax(v1, v2)
@inline vmin_fast(
v1::Vec{W,<:Union{Integer,StaticInt}},
v2::Vec{W,<:Union{Integer,StaticInt}}
) where {W} = vmin(v1, v2)
@inline vmax(v1::Vec{W,Bool}, v2::Vec{W,Bool}) where {W} = vor(v1, v2)
@inline vmin(v1::Vec{W,Bool}, v2::Vec{W,Bool}) where {W} = vand(v1, v2)
# floating point
for (op, f) ∈ [
("sqrt", :vsqrt),
("fabs", :vabs),
("floor", :vfloor),
("ceil", :vceil),
("trunc", :vtrunc),
("nearbyint", :vround)#,("roundeven",:roundeven)
]
# @eval @generated Base.$f(v1::Vec{W,T}) where {W, T <: Union{Float32,Float64}} = llvmcall_expr($op, W, T, (W,), (T,), "nsz arcp contract afn reassoc")
@eval @generated $f(v1::Vec{W,T}) where {W,T<:Union{Float32,Float64}} =
(TS = T === Float32 ? :Float32 : :Float64;
build_llvmcall_expr($op, W, TS, [W], [TS], "fast"))
end
@inline vsqrt(v::AbstractSIMD{W,T}) where {W,T<:IntegerTypes} = vsqrt(float(v))
@inline vsqrt(v::FloatingTypes) = Base.sqrt_llvm_fast(v)
@inline vsqrt(v::Union{Integer,StaticInt}) = Base.sqrt_llvm_fast(float(v))
# @inline roundeven(v::VecUnroll) = VecUnroll(fmap(roundeven, getfield(v,:data)))
# @generated function Base.round(::Type{Int64}, v1::Vec{W,T}) where {W, T <: Union{Float32,Float64}}
# llvmcall_expr("lrint", W, Int64, (W,), (T,), "")
# end
# @generated function Base.round(::Type{Int32}, v1::Vec{W,T}) where {W, T <: Union{Float32,Float64}}
# llvmcall_expr("lrint", W, Int32, (W,), (T,), "")
# end
@inline vtrunc(
::Type{I},
v::VecUnroll{N,1,T,T}
) where {N,I<:IntegerTypesHW,T<:NativeTypes} =
VecUnroll(fmap(Base.unsafe_trunc, I, data(v)))
@inline vtrunc(
::Type{I},
v::AbstractSIMD{W,T}
) where {W,I<:IntegerTypesHW,T<:NativeTypes} = vconvert(I, v)
for f ∈ [:vround, :vfloor, :vceil]
@eval @inline $f(
::Type{I},
v::AbstractSIMD{W,T}
) where {W,I<:IntegerTypesHW,T<:NativeTypes} = vconvert(I, $f(v))
end
for f ∈ [:vtrunc, :vround, :vfloor, :vceil]
@eval @inline $f(v::AbstractSIMD{W,I}) where {W,I<:IntegerTypesHW} = v
end
# """
# setbits(x::Unsigned, y::Unsigned, mask::Unsigned)
# If you have AVX512, setbits of vector-arguments will select bits according to mask `m`, selecting from `y` if 0 and from `x` if `1`.
# For scalar arguments, or vector arguments without AVX512, `setbits` requires the additional restrictions on `y` that all bits for
# which `m` is 1, `y` must be 0.
# That is for scalar arguments or vector arguments without AVX512, it requires the restriction that
# ((y ⊻ m) & m) == m
# """
# @inline setbits(x, y, m) = (x & m) | y
"""
bitselect(m::Unsigned, x::Unsigned, y::Unsigned)
If you have AVX512, setbits of vector-arguments will select bits according to mask `m`, selecting from `x` if 0 and from `y` if `1`.
For scalar arguments, or vector arguments without AVX512, `setbits` requires the additional restrictions on `y` that all bits for
which `m` is 1, `y` must be 0.
That is for scalar arguments or vector arguments without AVX512, it requires the restriction that
((y ⊻ m) & m) == m
"""
@inline bitselect(m, x, y) = ((~m) & x) | (m & y)
# AVX512 lets us use 1 instruction instead of 2 dependent instructions to set bits
@generated function vpternlog(
m::Vec{W,UInt64},
x::Vec{W,UInt64},
y::Vec{W,UInt64},
::Val{L}
) where {W,L}
@assert W ∈ (2, 4, 8)
bits = 64W
decl64 = "declare <$W x i64> @llvm.x86.avx512.mask.pternlog.q.$(bits)(<$W x i64>, <$W x i64>, <$W x i64>, i32, i8)"
instr64 = """
%res = call <$W x i64> @llvm.x86.avx512.mask.pternlog.q.$(bits)(<$W x i64> %0, <$W x i64> %1, <$W x i64> %2, i32 $L, i8 -1)
ret <$W x i64> %res
"""
arg_syms = [:(data(m)), :(data(x)), :(data(y))]
llvmcall_expr(
decl64,
instr64,
:(_Vec{$W,UInt64}),
:(Tuple{_Vec{$W,UInt64},_Vec{$W,UInt64},_Vec{$W,UInt64}}),
"<$W x i64>",
["<$W x i64>", "<$W x i64>", "<$W x i64>"],
arg_syms
)
end
@generated function vpternlog(
m::Vec{W,UInt32},
x::Vec{W,UInt32},
y::Vec{W,UInt32},
::Val{L}
) where {W,L}
@assert W ∈ (4, 8, 16)
bits = 32W
decl32 = "declare <$W x i32> @llvm.x86.avx512.mask.pternlog.d.$(bits)(<$W x i32>, <$W x i32>, <$W x i32>, i32, i16)"
instr32 = """
%res = call <$W x i32> @llvm.x86.avx512.mask.pternlog.d.$(bits)(<$W x i32> %0, <$W x i32> %1, <$W x i32> %2, i32 $L, i16 -1)
ret <$W x i32> %res
"""
arg_syms = [:(data(m)), :(data(x)), :(data(y))]
llvmcall_expr(
decl32,
instr32,
:(_Vec{$W,UInt32}),
:(Tuple{_Vec{$W,UInt32},_Vec{$W,UInt32},_Vec{$W,UInt32}}),
"<$W x i32>",
["<$W x i32>", "<$W x i32>", "<$W x i32>"],
arg_syms
)
end
# @eval @generated function setbits(x::Vec{W,T}, y::Vec{W,T}, m::Vec{W,T}) where {W,T <: Union{UInt32,UInt64}}
# ex = if W*sizeof(T) ∈ (16,32,64)
# :(vpternlog(x, y, m, Val{216}()))
# else
# :((x & m) | y)
# end
# Expr(:block, Expr(:meta, :inline), ex)
# end
@inline _bitselect(
m::Vec{W,T},
x::Vec{W,T},
y::Vec{W,T},
::False
) where {W,T<:Union{UInt32,UInt64}} = ((~m) & x) | (m & y)
@inline _bitselect(
m::Vec{W,T},
x::Vec{W,T},
y::Vec{W,T},
::True
) where {W,T<:Union{UInt32,UInt64}} = vpternlog(m, x, y, Val{172}())
@inline function bitselect(
m::Vec{W,T},
x::Vec{W,T},
y::Vec{W,T}
) where {W,T<:Union{UInt32,UInt64}}
bytes = StaticInt{W}() * static_sizeof(T)
use_ternary_logic =
has_feature(Val(:x86_64_avx512f)) & (
(eq(bytes, StaticInt{16}()) | eq(bytes, StaticInt{32}())) |
eq(bytes, StaticInt{64}())
)
_bitselect(m, x, y, use_ternary_logic)
end
@inline function _vcopysign(
v1::Vec{W,Float64},
v2::Vec{W,Float64},
::True
) where {W}
reinterpret(
Float64,
bitselect(
Vec{W,UInt64}(0x8000000000000000),
reinterpret(UInt64, v1),
reinterpret(UInt64, v2)
)
)
end
@inline function _vcopysign(
v1::Vec{W,Float32},
v2::Vec{W,Float32},
::True
) where {W}
reinterpret(
Float32,
bitselect(
Vec{W,UInt32}(0x80000000),
reinterpret(UInt32, v1),
reinterpret(UInt32, v2)
)
)
end
@inline function _vcopysign(
v1::Vec{W,Float64},
v2::Vec{W,Float64},
::False
) where {W}
llvm_copysign(v1, v2)
end
@inline function _vcopysign(
v1::Vec{W,Float32},
v2::Vec{W,Float32},
::False
) where {W}
llvm_copysign(v1, v2)
end
@inline function vcopysign(
v1::Vec{W,T},
v2::Vec{W,T}
) where {W,T<:Union{Float32,Float64}}
_vcopysign(v1, v2, has_feature(Val(:x86_64_avx512f)))
end
for (op, f, fast) ∈ [
("minnum", :vmin, false),
("minnum", :vmin_fast, true),
("maxnum", :vmax, false),
("maxnum", :vmax_fast, true),
("copysign", :llvm_copysign, true)
]
ff = fast_flags(fast)
fast && (ff *= " nnan")
@eval @generated function $f(
v1::Vec{W,T},
v2::Vec{W,T}
) where {W,T<:Union{Float32,Float64}}
TS = T === Float32 ? :Float32 : :Float64
build_llvmcall_expr($op, W, TS, [W, W], [TS, TS], $ff)
end
end
@inline _signbit(v::Vec{W,I}) where {W,I<:Signed} = v & Vec{W,I}(typemin(I))
@inline vcopysign(v1::Vec{W,I}, v2::Vec{W,I}) where {W,I<:Signed} =
vifelse(_signbit(v1) == _signbit(v2), v1, -v1)
@inline vcopysign(x::Float32, v::Vec{W}) where {W} =
vcopysign(vbroadcast(Val{W}(), x), v)
@inline vcopysign(x::Float64, v::Vec{W}) where {W} =
vcopysign(vbroadcast(Val{W}(), x), v)
@inline vcopysign(x::Float32, v::VecUnroll{N,W,T,V}) where {N,W,T,V} =
vcopysign(vbroadcast(Val{W}(), x), v)
@inline vcopysign(x::Float64, v::VecUnroll{N,W,T,V}) where {N,W,T,V} =
vcopysign(vbroadcast(Val{W}(), x), v)
@inline vcopysign(v::Vec, u::VecUnroll) =
VecUnroll(fmap(vcopysign, v, getfield(u, :data)))
@inline vcopysign(v::Vec{W,T}, x::NativeTypes) where {W,T} =
vcopysign(v, Vec{W,T}(x))
@inline vcopysign(v1::Vec{W,T}, v2::Vec{W}) where {W,T} =
vcopysign(v1, convert(Vec{W,T}, v2))
@inline vcopysign(v1::Vec{W,T}, ::Vec{W,<:Unsigned}) where {W,T} = vabs(v1)
@inline vcopysign(s::IntegerTypesHW, v::Vec{W}) where {W} =
vcopysign(vbroadcast(Val{W}(), s), v)
@inline vcopysign(v::Vec, s::UnsignedHW) = vabs(v)
@inline vcopysign(v::VecUnroll, s::UnsignedHW) = vabs(v)
@inline vcopysign(v::VecUnroll{N,W,T}, s::NativeTypes) where {N,W,T} =
VecUnroll(fmap(vcopysign, getfield(v, :data), vbroadcast(Val{W}(), s)))
for (f, fl) ∈ [
(:vmax, :max),
(:vmax_fast, :max_fast),
(:vmin, :min),
(:vmin_fast, :min_fast)
]
@eval begin
@inline function $f(
a::Union{FloatingTypes,Vec{<:Any,<:FloatingTypes}},
b::Union{FloatingTypes,Vec{<:Any,<:FloatingTypes}}
)
c, d = promote(a, b)
$f(c, d)
end
@inline function $f(
a::Union{FloatingTypes,Vec{<:Any,<:FloatingTypes}},
b::Union{NativeTypes,Vec{<:Any,<:NativeTypes}}
)
c, d = promote(a, b)
$f(c, d)
end
@inline function $f(
a::Union{NativeTypes,Vec{<:Any,<:NativeTypes}},
b::Union{FloatingTypes,Vec{<:Any,<:FloatingTypes}}
)
c, d = promote(a, b)
$f(c, d)
end
@inline $f(v::Vec{W,<:IntegerTypesHW}, s::IntegerTypesHW) where {W} =
$f(v, vbroadcast(Val{W}(), s))
@inline $f(s::IntegerTypesHW, v::Vec{W,<:IntegerTypesHW}) where {W} =
$f(vbroadcast(Val{W}(), s), v)
@inline $f(a::FloatingTypes, b::FloatingTypes) = Base.FastMath.$fl(a, b)
@inline $f(a::Union{Integer,StaticInt}, b::Union{Integer,StaticInt}) =
Base.FastMath.$fl(a, b)
end
end
# ternary
for (op, f, fast) ∈ [
("fma", :vfma, false),
("fma", :vfma_fast, true),
("fmuladd", :vmuladd, false),
("fmuladd", :vmuladd_fast, true)
]
@eval @generated function $f(
v1::Vec{W,T},
v2::Vec{W,T},
v3::Vec{W,T}
) where {W,T<:FloatingTypes}
TS = JULIA_TYPES[T]
build_llvmcall_expr(
$op,
W,
TS,
[W, W, W],
[TS, TS, TS],
$(fast_flags(fast))
)
end
end
# @inline Base.fma(a::Vec, b::Vec, c::Vec) = vfma(a,b,c)
# @inline Base.muladd(a::Vec{W,T}, b::Vec{W,T}, c::Vec{W,T}) where {W,T<:FloatingTypes} = vmuladd(a,b,c)
# Generic fallbacks
@inline vfma(a::NativeTypes, b::NativeTypes, c::NativeTypes) = fma(a, b, c)
@inline vmuladd(a::NativeTypes, b::NativeTypes, c::NativeTypes) =
muladd(a, b, c)
@inline vfma_fast(a::NativeTypes, b::NativeTypes, c::NativeTypes) =
muladd(a, b, c)
@inline vmuladd_fast(a::Float32, b::Float32, c::Float32) =
Base.FastMath.add_float_fast(Base.FastMath.mul_float_fast(a, b), c)
@inline vmuladd_fast(a::Float64, b::Float64, c::Float64) =
Base.FastMath.add_float_fast(Base.FastMath.mul_float_fast(a, b), c)
@inline vmuladd_fast(a::NativeTypes, b::NativeTypes, c::NativeTypes) =
Base.FastMath.add_fast(Base.FastMath.mul_fast(a, b), c)
@inline vfma(a, b, c) = fma(a, b, c)
@inline vmuladd(a, b, c) = muladd(a, b, c)
@inline vfma_fast(a, b, c) = fma(a, b, c)
@inline vmuladd_fast(a, b, c) =
Base.FastMath.add_fast(Base.FastMath.mul_fast(a, b), c)
for f ∈ [:vfma, :vmuladd, :vfma_fast, :vmuladd_fast]
@eval @inline function $f(
v1::AbstractSIMD{W,T},
v2::AbstractSIMD{W,T},
v3::AbstractSIMD{W,T}
) where {W,T<:IntegerTypesHW}
vadd(vmul(v1, v2), v3)
end
end
# vfmadd -> muladd -> promotes arguments to hit definitions from VectorizationBase
# const vfmadd = FMA_FAST ? vfma : vmuladd
@inline vfmadd(a, b, c) = vmuladd(a, b, c)
# @inline _vfmadd(a, b, c, ::True) = vfma(a, b, c)
# @inline _vfmadd(a, b, c, ::False) = vmuladd(a, b, c)
# @inline vfmadd(a, b, c) = _vfmadd(a, b, c, fma_fast())
@inline vfnmadd(a, b, c) = vfmadd(-a, b, c)
@inline vfmsub(a, b, c) = vfmadd(a, b, -c)
@inline vfnmsub(a, b, c) = -vfmadd(a, b, c)
# const vfmadd_fast = FMA_FAST ? vfma_fast : vmuladd_fast
@inline vfmadd_fast(a, b, c) = vmuladd_fast(a, b, c)
# @inline _vfmadd_fast(a, b, c, ::True) = vfma_fast(a, b, c)
# @inline _vfmadd_fast(a, b, c, ::False) = vmuladd_fast(a, b, c)
# @inline vfmadd_fast(a, b, c) = _vfmadd_fast(a, b, c, fma_fast())
# @inline vfnmadd_fast(a, b, c) = @fastmath c - a * b
@inline vfnmadd_fast(a, b, c) = vfmadd_fast(Base.FastMath.sub_fast(a), b, c)
@inline vfmsub_fast(a, b, c) = vfmadd_fast(a, b, Base.FastMath.sub_fast(c))
@inline vfnmsub_fast(a, b, c) = Base.FastMath.sub_fast(vfmadd_fast(a, b, c))
@inline vfnmadd_fast(
a::Union{Unsigned,AbstractSIMD{<:Any,<:Union{Unsigned,Bit,Bool}}},
b,
c
) = Base.FastMath.sub_fast(c, Base.FastMath.mul_fast(a, b))
@inline vfmsub_fast(
a,
b,
c::Union{Unsigned,AbstractSIMD{<:Any,<:Union{Unsigned,Bit,Bool}}}
) = Base.FastMath.sub_fast(Base.FastMath.mul_fast(a, b), c)
# floating vector, integer scalar
# @generated function Base.:(^)(v1::Vec{W,T}, v2::Int32) where {W, T <: Union{Float32,Float64}}
# llvmcall_expr("powi", W, T, (W, 1), (T, Int32), "nsz arcp contract afn reassoc")
# end
# @inline ifelse_reduce_step(f::F, a::Number, b::Number) where {F} = ifelse(f(a,b), a, b)
@inline firstelement(x::AbstractSIMDVector) = extractelement(x, 0)
@inline secondelement(x::AbstractSIMDVector) = extractelement(x, 1)
@inline firstelement(x::VecUnroll) = VecUnroll(fmap(extractelement, data(x), 0))
@inline secondelement(x::VecUnroll) =
VecUnroll(fmap(extractelement, data(x), 1))
@inline ifelse_reduce(f::F, x::Number) where {F} = x
@inline function ifelse_reduce(f::F, x::AbstractSIMD{2,T}) where {F,T}
a = firstelement(x)
b = secondelement(x)
ifelse(f(a, b), a, b)::T
end
@inline function ifelse_reduce(f::F, x::AbstractSIMD{W,T}) where {F,W,T}
a = uppervector(x)
b = lowervector(x)
ifelse_reduce(f, ifelse(f(a, b), a, b))::T
end
@inline ifelse_reduce(f::F, x::MM) where {F} = ifelse_reduce(f, Vec(x))
# @inline ifelse_reduce(f::F, x::VecUnroll) where {F} = ifelse_reduce(f, ifelse_collapse(f, x))
@inline ifelse_collapse(f::F, a, x::Tuple{}) where {F} = a
@inline function ifelse_collapse(f::F, a, x::Tuple{T}) where {F,T}
b = first(x)
ifelse(f(a, b), a, b)::typeof(a)
end
@inline function ifelse_collapse(f::F, a, x::Tuple) where {F}
b = first(x)
ifelse_collapse(f, ifelse(f(a, b), a, b), Base.tail(x))::typeof(a)
end
@inline function ifelse_collapse(f::F, x::VecUnroll{N,W,T,V}) where {F,N,W,T,V}
d = data(x)
ifelse_collapse(f, first(d), Base.tail(d))::V
end
# @inline function ifelse_collapse(f::F, x::VecUnroll) where {F}
# d = data(x)
# ifelse_collapse(f, first(d), Base.tail(d))
# end
@inline ifelse_reduce_mirror(f::F, x::Number, y::Number) where {F} = x, y
@inline function ifelse_reduce_mirror(
f::F,
x::AbstractSIMD{2},
y::AbstractSIMD{2}
) where {F}
a = firstelement(x)
b = secondelement(x)
m = firstelement(y)
n = secondelement(y)
fmn = f(m, n)
ifelse(fmn, a, b), ifelse(fmn, m, n)
end
@inline function ifelse_reduce_mirror(
f::F,
x::AbstractSIMD,
y::AbstractSIMD
) where {F}
a = uppervector(x)
b = lowervector(x)
m = uppervector(y)
n = lowervector(y)
fmn = f(m, n)
ifelse_reduce_mirror(f, ifelse(fmn, a, b), ifelse(fmn, m, n))
end
@inline ifelse_reduce_mirror(f::F, x::MM, y::MM) where {F} =
ifelse_reduce_mirror(f, Vec(x), Vec(y))
function collapse_mirror_expr(N, op, final)
N += 1
t = Expr(:tuple)
m = Expr(:tuple)
s = Vector{Symbol}(undef, N)
r = Vector{Symbol}(undef, N)
cmp = Vector{Symbol}(undef, N >>> 1)
for n ∈ 1:N
s_n = s[n] = Symbol(:v_, n)
push!(t.args, s_n)
r_n = r[n] = Symbol(:r_, n)
push!(m.args, r_n)
n ≤ length(cmp) && (cmp[n] = Symbol(:cmp_, n))
end
q = quote
$(Expr(:meta, :inline))
$m = data(a)
$t = data(x)
end
_final = if final == 1
1
else
2final
end
while N > _final
for n ∈ 1:N>>>1
push!(q.args, Expr(:(=), cmp[n], Expr(:call, op, s[n], s[n+(N>>>1)])))
push!(
q.args,
Expr(:(=), s[n], Expr(:call, ifelse, cmp[n], s[n], s[n+(N>>>1)]))
)
push!(
q.args,
Expr(:(=), r[n], Expr(:call, ifelse, cmp[n], r[n], r[n+(N>>>1)]))
)
end
if isodd(N)
push!(q.args, Expr(:(=), cmp[1], Expr(:call, op, s[1], s[N])))
push!(q.args, Expr(:(=), s[1], Expr(:call, ifelse, cmp[1], s[1], s[N])))
push!(q.args, Expr(:(=), r[1], Expr(:call, ifelse, cmp[1], r[1], r[N])))
end
N >>>= 1
end
if final ≠ 1
for n ∈ final+1:N
push!(q.args, Expr(:(=), cmp[n-final], Expr(:call, op, s[n-final], s[n])))
push!(
q.args,
Expr(
:(=),
s[n-final],
Expr(:call, ifelse, cmp[n-final], s[n-final], s[n])
)
)
push!(
q.args,
Expr(
:(=),
r[n-final],
Expr(:call, ifelse, cmp[n-final], r[n-final], r[n])
)
)
end
# t = Expr(:tuple)
m = Expr(:tuple)
for n ∈ 1:final
# push!(t.args, s[n])
push!(m.args, r[n])
end
push!(q.args, :(VecUnroll($m)))
# push!(q.args, :((VecUnroll($t),VecUnroll($m))))
# push!(q.args, :(@show(VecUnroll($t),VecUnroll($m))))
end
q
end
@generated ifelse_collapse_mirror(
f::F,
a::VecUnroll{N},
x::VecUnroll{N}
) where {F,N} = collapse_mirror_expr(N, :f, 1)
# @generated ifelse_collapse_mirror(f::F, a::VecUnroll{N}, x::VecUnroll{N}, ::StaticInt{C}) where {F,N,C} = collapse_mirror_expr(N, :f, C)
@generated ifelse_collapse_mirror(
f::F,
a::VecUnroll{N},
x::VecUnroll{N},
::StaticInt{C}
) where {C,N,F} = collapse_mirror_expr(N, :f, C)
# @inline ifelse_collapse_mirror(f::F, a, ::Tuple{}, x, ::Tuple{}) where {F} = a, x
# @inline function ifelse_collapse_mirror(f::F, a, c::Tuple{T}, x, z::Tuple{T}) where {F,T}
# b = first(c); y = first(z)
# fxy = f(x,y)
# ifelse(fxy, a, b), ifelse(fxy, x, y)
# end
# @inline function ifelse_collapse_mirror(f::F, a, c::Tuple, x, z::Tuple) where {F}
# b = first(c); y = first(z)
# fxy = f(x,y)
# ifelse_collapse_mirror(f, ifelse(fxy, a, b), Base.tail(c), ifelse(fxy, x, y), Base.tail(z))
# end
# @inline function ifelse_collapse_mirror(f::F, x::VecUnroll, y::VecUnroll) where {F}
# dx = data(x); dy = data(y)
# ifelse_collapse_mirror(f, first(dx), Base.tail(dx), first(dy), Base.tail(dy))
# end
for (opname, f) ∈ [("fadd", :vsum), ("fmul", :vprod)]
if Base.libllvm_version < v"12"
op = "experimental.vector.reduce.v2." * opname
else
op = "vector.reduce." * opname
end
@eval @generated function $f(
v1::T,
v2::Vec{W,T}
) where {W,T<:Union{Float32,Float64}}
TS = JULIA_TYPES[T]
build_llvmcall_expr(
$op,
-1,
TS,
[1, W],
[TS, TS],
"nsz arcp contract afn reassoc"
)
end
end
@inline vsum(s::S, v::Vec{W,T}) where {W,T,S} =
Base.FastMath.add_fast(s, vsum(v))
@inline vprod(s::S, v::Vec{W,T}) where {W,T,S} =
Base.FastMath.mul_fast(s, vprod(v))
# for (op,f) ∈ [
# ("vector.reduce.fmax",:vmaximum),
# ("vector.reduce.fmin",:vminimum)
# ]
# Base.libllvm_version < v"12" && (op = "experimental." * op)
# @eval @generated function $f(v1::Vec{W,T}) where {W, T <: Union{Float32,Float64}}
# TS = JULIA_TYPES[T]
# build_llvmcall_expr($op, -1, TS, [W], [TS], "nsz arcp contract afn reassoc")
# end
# end
@inline vminimum(x) = ifelse_reduce(<, x)
@inline vmaximum(x) = ifelse_reduce(>, x)
# @inline vminimum(x, y) = ifelse_reduce(<, x)
# @inline vmaximum(x, y) = ifelse_reduce(>, (ifelse_reduce(>, x), y))
for (op, f, S) ∈ [
("vector.reduce.add", :vsum, :Integer),
("vector.reduce.mul", :vprod, :Integer),
("vector.reduce.and", :vall, :Integer),
("vector.reduce.or", :vany, :Integer),
("vector.reduce.xor", :vxorreduce, :Integer),
("vector.reduce.smax", :vmaximum, :Signed),
("vector.reduce.smin", :vminimum, :Signed),
("vector.reduce.umax", :vmaximum, :Unsigned),
("vector.reduce.umin", :vminimum, :Unsigned)
]
Base.libllvm_version < v"12" && (op = "experimental." * op)
@eval @generated function $f(v1::Vec{W,T}) where {W,T<:$S}
TS = JULIA_TYPES[T]
build_llvmcall_expr($op, -1, TS, [W], [TS])
end
end
if Sys.ARCH == :aarch64 # TODO: maybe the default definition will stop segfaulting some day?
for I ∈ (:Int64, :UInt64), (f, op) ∈ ((:vmaximum, :max), (:vminimum, :min))
@eval @inline $f(v::Vec{W,$I}) where {W} =
ArrayInterface.reduce_tup($op, Tuple(v))
end
end
# W += W
# end
# end
@inline vsum(x::T) where {T<:NativeTypes} = x
@inline vprod(x::T) where {T<:NativeTypes} = x
@inline vsum(v::Vec{W,T}) where {W,T<:Union{Float32,Float64}} =
vsum(-zero(T), v)
@inline vprod(v::Vec{W,T}) where {W,T<:Union{Float32,Float64}} =
vprod(one(T), v)
@inline vsum(x, v::Vec{W,T}) where {W,T<:Union{Float32,Float64}} =
vsum(convert(T, x), v)
@inline vprod(x, v::Vec{W,T}) where {W,T<:Union{Float32,Float64}} =
vprod(convert(T, x), v)
for (f, f_to, op, reduce, twoarg) ∈ [
(:reduced_add, :reduce_to_add, :+, :vsum, true),
(:reduced_prod, :reduce_to_prod, :*, :vprod, true),
(:reduced_max, :reduce_to_max, :max, :vmaximum, false),
(:reduced_min, :reduce_to_min, :min, :vminimum, false),
(:reduced_all, :reduce_to_all, :(&), :vall, false),
(:reduced_any, :reduce_to_any, :(|), :vany, false)
]
@eval begin
@inline $f_to(x::NativeTypes, y::NativeTypes) = x
@inline $f_to(x::AbstractSIMD{W}, y::AbstractSIMD{W}) where {W} = x
@generated function $f_to(
x0::AbstractSIMD{W1},
y::AbstractSIMD{W2}
) where {W1,W2}
@assert W1 ≥ W2
i = 0
q = Expr(:block, Expr(:meta, :inline))
xtp = :x0
while (W1 >>> i) ≠ W2
i += 1
xt = Symbol(:x, i)
xt0 = Symbol(xt, :_0)
xt1 = Symbol(xt, :_1)
push!(
q.args,
Expr(:(=), Expr(:tuple, xt0, xt1), Expr(:call, :splitvector, xtp)),
Expr(:(=), xt, Expr(:call, $op, xt0, xt1))
)
xtp = xt
end
push!(q.args, Symbol(:x, i))
q
end
@inline $f_to(x::AbstractSIMD, y::NativeTypes) = $reduce(x)
@inline $f(x::NativeTypes, y::NativeTypes) = $op(x, y)
@inline $f(x::AbstractSIMD{W}, y::AbstractSIMD{W}) where {W} = $op(x, y)
@inline $f(x::AbstractSIMD, y::AbstractSIMD) = $op($f_to(x, y), y)
@inline $f_to(x::VecUnroll, y::VecUnroll) =
VecUnroll(fmap($f_to, getfield(x, :data), getfield(y, :data)))
@inline $f(x::VecUnroll, y::VecUnroll) =
VecUnroll(fmap($f, getfield(x, :data), getfield(y, :data)))
end
if twoarg
# @eval @inline $f(y::T, x::AbstractSIMD{W,T}) where {W,T} = $reduce(y, x)
@eval @inline $f(x::AbstractSIMD, y::NativeTypes) = $reduce(y, x)
# @eval @inline $f(x::AbstractSIMD, y::NativeTypes) = ((y2,x2,r) = @show (y, x, $reduce(y, x)); r)
else
# @eval @inline $f(y::T, x::AbstractSIMD{W,T}) where {W,T} = $op(y, $reduce(x))
@eval @inline $f(x::AbstractSIMD, y::NativeTypes) = $op(y, $reduce(x))
end
end
@inline roundint(x::Float32) = round(Int32, x)
@inline _roundint(x, ::True) = round(Int, x)
@inline _roundint(x, ::False) = round(Int32, x)
@inline roundint(x::Float64) = _roundint(x, has_feature(Val(:x86_64_avx512dq)))
if Sys.WORD_SIZE ≥ 64
@inline roundint(v::Vec{W,Float64}) where {W} =
_roundint(v, has_feature(Val(:x86_64_avx512dq)))
@inline roundint(v::Vec{W,Float32}) where {W} = round(Int32, v)
else
@inline roundint(v::Vec{W,T}) where {W,T<:Union{Float32,Float64}} =
round(Int32, v)
end
# binary
function count_zeros_func(W, I, op, tf = 1)
typ = "i$(8sizeof(I))"
vtyp = "<$W x $typ>"
instr = "@llvm.$op.v$(W)$(typ)"
decl = "declare $vtyp $instr($vtyp, i1)"
instrs = "%res = call $vtyp $instr($vtyp %0, i1 $tf)\nret $vtyp %res"
rettypexpr = :(_Vec{$W,$I})
llvmcall_expr(
decl,
instrs,
rettypexpr,
:(Tuple{$rettypexpr}),
vtyp,
[vtyp],
[:(data(v))]
)
end
# @generated Base.abs(v::Vec{W,I}) where {W, I <: Union{Integer,StaticInt}} = count_zeros_func(W, I, "abs", 0)
@generated vleading_zeros(v::Vec{W,I}) where {W,I<:IntegerTypesHW} =
count_zeros_func(W, I, "ctlz")
@generated vtrailing_zeros(v::Vec{W,I}) where {W,I<:IntegerTypesHW} =
count_zeros_func(W, I, "cttz")
for (op, f) ∈ [("ctpop", :vcount_ones)]
@eval @generated $f(v1::Vec{W,T}) where {W,T} =
(TS = JULIA_TYPES[T]; build_llvmcall_expr($op, W, TS, [W], [TS]))
end
for (op, f) ∈ [("fshl", :funnel_shift_left), ("fshr", :funnel_shift_right)]
@eval @generated function $f(
v1::Vec{W,T},
v2::Vec{W,T},
v3::Vec{W,T}
) where {W,T}
TS = JULIA_TYPES[T]
build_llvmcall_expr($op, W, TS, [W, W, W], [TS, TS, TS])
end
end
@inline function funnel_shift_left(a::T, b::T, c::T) where {T}
_T = eltype(a)
S = 8sizeof(_T) % _T
(a << c) | (b >>> (S - c))
end
@inline function funnel_shift_right(a::T, b::T, c::T) where {T}
_T = eltype(a)
S = 8sizeof(_T) % _T
(a >>> c) | (b << (S - c))
end
@inline function funnel_shift_left(_a, _b, _c)
a, b, c = promote(_a, _b, _c)
funnel_shift_left(a, b, c)
end
@inline function funnel_shift_right(_a, _b, _c)
a, b, c = promote(_a, _b, _c)
funnel_shift_right(a, b, c)
end
@inline funnel_shift_left(a::MM, b::MM, c::MM) =
funnel_shift_left(Vec(a), Vec(b), Vec(c))
@inline funnel_shift_right(a::MM, b::MM, c::MM) =
funnel_shift_right(Vec(a), Vec(b), Vec(c))
@inline rotate_left(a::T, b::T) where {T} = funnel_shift_left(a, a, b)
@inline rotate_right(a::T, b::T) where {T} = funnel_shift_right(a, a, b)
@inline function rotate_left(_a, _b)
a, b = promote_div(_a, _b)
funnel_shift_left(a, a, b)
end
@inline function rotate_right(_a, _b)
a, b = promote_div(_a, _b)
funnel_shift_right(a, a, b)
end
@inline vfmadd231(a, b, c) = vfmadd(a, b, c)
@inline vfnmadd231(a, b, c) = vfnmadd(a, b, c)
@inline vfmsub231(a, b, c) = vfmsub(a, b, c)
@inline vfnmsub231(a, b, c) = vfnmsub(a, b, c)
@inline vfmadd231(
a::Vec{W,T},
b::Vec{W,T},
c::Vec{W,T},
::False
) where {W,T<:Union{Float32,Float64}} = vfmadd(a, b, c)
@inline vfnmadd231(
a::Vec{W,T},
b::Vec{W,T},
c::Vec{W,T},
::False
) where {W,T<:Union{Float32,Float64}} = vfnmadd(a, b, c)
@inline vfmsub231(
a::Vec{W,T},
b::Vec{W,T},
c::Vec{W,T},
::False
) where {W,T<:Union{Float32,Float64}} = vfmsub(a, b, c)
@inline vfnmsub231(
a::Vec{W,T},
b::Vec{W,T},
c::Vec{W,T},
::False
) where {W,T<:Union{Float32,Float64}} = vfnmsub(a, b, c)
@generated function vfmadd231(
a::Vec{W,T},
b::Vec{W,T},
c::Vec{W,T},
::True
) where {W,T<:Union{Float32,Float64}}
typ = LLVM_TYPES[T]
suffix = T == Float32 ? "ps" : "pd"
vfmadd_str = """%res = call <$W x $(typ)> asm "vfmadd231$(suffix) \$3, \$2, \$1", "=v,0,v,v"(<$W x $(typ)> %2, <$W x $(typ)> %1, <$W x $(typ)> %0)
ret <$W x $(typ)> %res"""
quote
$(Expr(:meta, :inline))
Vec(
$LLVMCALL(
$vfmadd_str,
_Vec{$W,$T},
Tuple{_Vec{$W,$T},_Vec{$W,$T},_Vec{$W,$T}},
data(a),
data(b),
data(c)
)
)
end
end
@generated function vfnmadd231(
a::Vec{W,T},
b::Vec{W,T},
c::Vec{W,T},
::True
) where {W,T<:Union{Float32,Float64}}
typ = LLVM_TYPES[T]
suffix = T == Float32 ? "ps" : "pd"
vfnmadd_str = """%res = call <$W x $(typ)> asm "vfnmadd231$(suffix) \$3, \$2, \$1", "=v,0,v,v"(<$W x $(typ)> %2, <$W x $(typ)> %1, <$W x $(typ)> %0)
ret <$W x $(typ)> %res"""
quote
$(Expr(:meta, :inline))
Vec(
$LLVMCALL(
$vfnmadd_str,
_Vec{$W,$T},
Tuple{_Vec{$W,$T},_Vec{$W,$T},_Vec{$W,$T}},
data(a),
data(b),
data(c)
)
)
end
end
@generated function vfmsub231(
a::Vec{W,T},
b::Vec{W,T},
c::Vec{W,T},
::True
) where {W,T<:Union{Float32,Float64}}
typ = LLVM_TYPES[T]
suffix = T == Float32 ? "ps" : "pd"
vfmsub_str = """%res = call <$W x $(typ)> asm "vfmsub231$(suffix) \$3, \$2, \$1", "=v,0,v,v"(<$W x $(typ)> %2, <$W x $(typ)> %1, <$W x $(typ)> %0)
ret <$W x $(typ)> %res"""
quote
$(Expr(:meta, :inline))
Vec(
$LLVMCALL(
$vfmsub_str,
_Vec{$W,$T},
Tuple{_Vec{$W,$T},_Vec{$W,$T},_Vec{$W,$T}},
data(a),
data(b),
data(c)
)
)
end
end
@generated function vfnmsub231(
a::Vec{W,T},
b::Vec{W,T},
c::Vec{W,T},
::True
) where {W,T<:Union{Float32,Float64}}
typ = LLVM_TYPES[T]
suffix = T == Float32 ? "ps" : "pd"
vfnmsub_str = """%res = call <$W x $(typ)> asm "vfnmsub231$(suffix) \$3, \$2, \$1", "=v,0,v,v"(<$W x $(typ)> %2, <$W x $(typ)> %1, <$W x $(typ)> %0)
ret <$W x $(typ)> %res"""
quote
$(Expr(:meta, :inline))
Vec(
$LLVMCALL(
$vfnmsub_str,
_Vec{$W,$T},
Tuple{_Vec{$W,$T},_Vec{$W,$T},_Vec{$W,$T}},
data(a),
data(b),
data(c)
)
)
end
end
@inline vifelse(::typeof(vfmadd231), m, a, b, c, ::False) =
vifelse(vfmadd, m, a, b, c)
@inline vifelse(::typeof(vfnmadd231), m, a, b, c, ::False) =
vifelse(vfnmadd, m, a, b, c)
@inline vifelse(::typeof(vfmsub231), m, a, b, c, ::False) =
vifelse(vfmsub, m, a, b, c)
@inline vifelse(::typeof(vfnmsub231), m, a, b, c, ::False) =
vifelse(vfnmsub, m, a, b, c)
@generated function vifelse(
::typeof(vfmadd231),
m::AbstractMask{W,U},
a::Vec{W,T},
b::Vec{W,T},
c::Vec{W,T},
::True
) where {W,U<:Unsigned,T<:Union{Float32,Float64}}
typ = LLVM_TYPES[T]
suffix = T == Float32 ? "ps" : "pd"
vfmaddmask_str = """%res = call <$W x $(typ)> asm "vfmadd231$(suffix) \$3, \$2, \$1 {\$4}", "=v,0,v,v,^Yk"(<$W x $(typ)> %2, <$W x $(typ)> %1, <$W x $(typ)> %0, i$W %3)
ret <$W x $(typ)> %res"""
quote
$(Expr(:meta, :inline))
Vec(
$LLVMCALL(
$vfmaddmask_str,
_Vec{$W,$T},
Tuple{_Vec{$W,$T},_Vec{$W,$T},_Vec{$W,$T},$U},
data(a),
data(b),
data(c),
data(m)
)
)
end
end
@generated function vifelse(
::typeof(vfnmadd231),
m::AbstractMask{W,U},
a::Vec{W,T},
b::Vec{W,T},
c::Vec{W,T},
::True
) where {W,U<:Unsigned,T<:Union{Float32,Float64}}
typ = LLVM_TYPES[T]
suffix = T == Float32 ? "ps" : "pd"
vfnmaddmask_str = """%res = call <$W x $(typ)> asm "vfnmadd231$(suffix) \$3, \$2, \$1 {\$4}", "=v,0,v,v,^Yk"(<$W x $(typ)> %2, <$W x $(typ)> %1, <$W x $(typ)> %0, i$W %3)
ret <$W x $(typ)> %res"""
quote
$(Expr(:meta, :inline))
Vec(
$LLVMCALL(
$vfnmaddmask_str,
_Vec{$W,$T},
Tuple{_Vec{$W,$T},_Vec{$W,$T},_Vec{$W,$T},$U},
data(a),
data(b),
data(c),
data(m)
)
)
end
end
@generated function vifelse(
::typeof(vfmsub231),
m::AbstractMask{W,U},
a::Vec{W,T},
b::Vec{W,T},
c::Vec{W,T},
::True
) where {W,U<:Unsigned,T<:Union{Float32,Float64}}
typ = LLVM_TYPES[T]
suffix = T == Float32 ? "ps" : "pd"
vfmsubmask_str = """%res = call <$W x $(typ)> asm "vfmsub231$(suffix) \$3, \$2, \$1 {\$4}", "=v,0,v,v,^Yk"(<$W x $(typ)> %2, <$W x $(typ)> %1, <$W x $(typ)> %0, i$W %3)
ret <$W x $(typ)> %res"""
quote
$(Expr(:meta, :inline))
Vec(
$LLVMCALL(
$vfmsubmask_str,
_Vec{$W,$T},
Tuple{_Vec{$W,$T},_Vec{$W,$T},_Vec{$W,$T},$U},
data(a),
data(b),
data(c),
data(m)
)
)
end
end
@generated function vifelse(
::typeof(vfnmsub231),
m::AbstractMask{W,U},
a::Vec{W,T},
b::Vec{W,T},
c::Vec{W,T},
::True
) where {W,U<:Unsigned,T<:Union{Float32,Float64}}
typ = LLVM_TYPES[T]
suffix = T == Float32 ? "ps" : "pd"
vfnmsubmask_str = """%res = call <$W x $(typ)> asm "vfnmsub231$(suffix) \$3, \$2, \$1 {\$4}", "=v,0,v,v,^Yk"(<$W x $(typ)> %2, <$W x $(typ)> %1, <$W x $(typ)> %0, i$W %3)
ret <$W x $(typ)> %res"""
quote
$(Expr(:meta, :inline))
Vec(
$LLVMCALL(
$vfnmsubmask_str,
_Vec{$W,$T},
Tuple{_Vec{$W,$T},_Vec{$W,$T},_Vec{$W,$T},$U},
data(a),
data(b),
data(c),
data(m)
)
)
end
end
@inline function use_asm_fma(::StaticInt{W}, ::Type{T}) where {W,T}
WT = StaticInt{W}() * static_sizeof(T)
(has_feature(Val(:x86_64_fma)) & _ispow2(StaticInt{W}())) &
(le(WT, register_size()) & ge(WT, StaticInt{16}()))
end
@inline function vfmadd231(
a::Vec{W,T},
b::Vec{W,T},
c::Vec{W,T}
) where {W,T<:Union{Float32,Float64}}
vfmadd231(a, b, c, use_asm_fma(StaticInt{W}(), T))
end
@inline function vfnmadd231(
a::Vec{W,T},
b::Vec{W,T},
c::Vec{W,T}
) where {W,T<:Union{Float32,Float64}}
vfnmadd231(a, b, c, use_asm_fma(StaticInt{W}(), T))
end
@inline function vfmsub231(
a::Vec{W,T},
b::Vec{W,T},
c::Vec{W,T}
) where {W,T<:Union{Float32,Float64}}
vfmsub231(a, b, c, use_asm_fma(StaticInt{W}(), T))
end
@inline function vfnmsub231(
a::Vec{W,T},
b::Vec{W,T},
c::Vec{W,T}
) where {W,T<:Union{Float32,Float64}}
vfnmsub231(a, b, c, use_asm_fma(StaticInt{W}(), T))
end
@inline function vifelse(
::typeof(vfmadd231),
m::AbstractMask{W,U},
a::Vec{W,T},
b::Vec{W,T},
c::Vec{W,T}
) where {W,T<:Union{Float32,Float64},U}
vifelse(
vfmadd231,
m,
a,
b,
c,
use_asm_fma(StaticInt{W}(), T) & has_feature(Val(:x86_64_avx512bw))
)
end
@inline function vifelse(
::typeof(vfnmadd231),
m::AbstractMask{W,U},
a::Vec{W,T},
b::Vec{W,T},
c::Vec{W,T}
) where {W,T<:Union{Float32,Float64},U}
vifelse(
vfnmadd231,
m,
a,
b,
c,
use_asm_fma(StaticInt{W}(), T) & has_feature(Val(:x86_64_avx512bw))
)
end
@inline function vifelse(
::typeof(vfmsub231),
m::AbstractMask{W,U},
a::Vec{W,T},
b::Vec{W,T},
c::Vec{W,T}
) where {W,T<:Union{Float32,Float64},U}
vifelse(
vfmsub231,
m,
a,
b,
c,
use_asm_fma(StaticInt{W}(), T) & has_feature(Val(:x86_64_avx512bw))
)
end
@inline function vifelse(
::typeof(vfnmsub231),
m::AbstractMask{W,U},
a::Vec{W,T},
b::Vec{W,T},
c::Vec{W,T}
) where {W,T<:Union{Float32,Float64},U}
vifelse(
vfnmsub231,
m,
a,
b,
c,
use_asm_fma(StaticInt{W}(), T) & has_feature(Val(:x86_64_avx512bw))
)
end
"""
Fast approximate reciprocal.
Guaranteed accurate to at least 2^-14 ≈ 6.103515625e-
Useful for special funcion implementations.
"""
@inline inv_approx(x) = inv(x)
@inline inv_approx(v::VecUnroll) =
VecUnroll(fmap(inv_approx, getfield(v, :data)))
@inline vinv_fast(v) = vinv(v)
@inline vinv_fast(v::AbstractSIMD{<:Any,<:Union{Integer,StaticInt}}) =
vinv_fast(float(v))
@static if (Sys.ARCH === :x86_64) || (Sys.ARCH === :i686)
function inv_approx_expr(
W,
@nospecialize(T),
hasavx512f,
hasavx512vl,
hasavx,
vector::Bool = true
)
bits = 8sizeof(T) * W
pors = (vector | hasavx512f) ? 'p' : 's'
if (hasavx512f && (bits === 512)) || (hasavx512vl && (bits ∈ (128, 256)))
typ = T === Float64 ? "double" : "float"
vtyp = "<$W x $(typ)>"
dors = T === Float64 ? "d" : "s"
f = "@llvm.x86.avx512.rcp14.$(pors)$(dors).$(bits)"
decl = "declare $(vtyp) $f($(vtyp), $(vtyp), i$(max(8,W))) nounwind readnone"
instrs = "%res = call $(vtyp) $f($vtyp %0, $vtyp zeroinitializer, i$(max(8,W)) -1)\nret $(vtyp) %res"
return llvmcall_expr(
decl,
instrs,
:(_Vec{$W,$T}),
:(Tuple{_Vec{$W,$T}}),
vtyp,
[vtyp],
[:(data(v))],
true
)
elseif (hasavx && (W == 8)) && (T === Float32)
decl = "declare <8 x float> @llvm.x86.avx.rcp.$(pors)s.256(<8 x float>) nounwind readnone"
instrs = "%res = call <8 x float> @llvm.x86.avx.rcp.$(pors)s.256(<8 x float> %0)\nret <8 x float> %res"
return llvmcall_expr(
decl,
instrs,
:(_Vec{8,Float32}),
:(Tuple{_Vec{8,Float32}}),
"<8 x float>",
["<8 x float>"],
[:(data(v))],
true
)
elseif W == 4
decl = "declare <4 x float> @llvm.x86.sse.rcp.$(pors)s(<4 x float>) nounwind readnone"
instrs = "%res = call <4 x float> @llvm.x86.sse.rcp.$(pors)s(<4 x float> %0)\nret <4 x float> %res"
if T === Float32
return llvmcall_expr(
decl,
instrs,
:(_Vec{4,Float32}),
:(Tuple{_Vec{4,Float32}}),
"<4 x float>",
["<4 x float>"],
[:(data(v))]
)
else#if T === Float64
argexpr = [:(data(convert(Float32, v)))]
call = llvmcall_expr(
decl,
instrs,
:(_Vec{4,Float32}),
:(Tuple{_Vec{4,Float32}}),
"<4 x float>",
["<4 x float>"],
argexpr,
true
)
return :(convert(Float64, $call))
end
elseif (hasavx512f || (T === Float32)) && bits < 128
L = 16 ÷ sizeof(T)
inv_expr = inv_approx_expr(L, T, hasavx512f, hasavx512vl, hasavx, W > 1)
resize_expr =
W < 1 ? :(extractelement(v⁻¹, 0)) : :(vresize(Val{$W}(), v⁻¹))
return quote
v⁻¹ = let v = vresize(Val{$L}(), v)
$inv_expr
end
$resize_expr
end
else
return :(inv(v))
end
end
@generated function _inv_approx(
v::Vec{W,T},
::AVX512F,
::AVX512VL,
::AVX
) where {W,T<:Union{Float32,Float64},AVX512F,AVX512VL,AVX}
inv_approx_expr(
W,
T,
AVX512F === True,
AVX512VL === True,
AVX === True,
true
)
end
@generated function _inv_approx(
v::T,
::AVX512F,
::AVX512VL,
::AVX
) where {T<:Union{Float32,Float64},AVX512F,AVX512VL,AVX}
inv_approx_expr(
0,
T,
AVX512F === True,
AVX512VL === True,
AVX === True,
false
)
end
@inline function inv_approx(v::Vec{W,T}) where {W,T<:Union{Float32,Float64}}
_inv_approx(
v,
has_feature(Val(:x86_64_avx512f)),
has_feature(Val(:x86_64_avx512vl)),
has_feature(Val(:x86_64_avx))
)
end
@inline function inv_approx(v::T) where {T<:Union{Float32,Float64}}
_inv_approx(
v,
has_feature(Val(:x86_64_avx512f)),
has_feature(Val(:x86_64_avx512vl)),
has_feature(Val(:x86_64_avx))
)
end
"""
vinv_fast(x)
More accurate version of inv_approx, using 1 (`Float32`) or 2 (`Float64`) Newton iterations to achieve reasonable accuracy.
Requires x86 CPUs for `Float32` support, and `AVX512F` for `Float64`. Otherwise, it falls back on `vinv(x)`.
y = 1 / x
Use a Newton iteration:
yₙ₊₁ = yₙ - f(yₙ)/f′(yₙ)
f(yₙ) = 1/yₙ - x
f′(yₙ) = -1/yₙ²
yₙ₊₁ = yₙ + (1/yₙ - x) * yₙ² = yₙ + yₙ - x * yₙ² = 2yₙ - x * yₙ² = yₙ * ( 2 - x * yₙ )
yₙ₊₁ = yₙ * ( 2 - x * yₙ )
"""
@inline function vinv_fast(v::AbstractSIMD{W,Float32}) where {W}
v⁻¹ = inv_approx(v)
vmul_fast(v⁻¹, vfnmadd_fast(v, v⁻¹, 2.0f0))
end
@inline function _vinv_fast(v, ::True)
v⁻¹₁ = inv_approx(v)
v⁻¹₂ = vmul_fast(v⁻¹₁, vfnmadd_fast(v, v⁻¹₁, 2.0))
v⁻¹₃ = vmul_fast(v⁻¹₂, vfnmadd_fast(v, v⁻¹₂, 2.0))
end
@inline _vinv_fast(v, ::False) = vinv(v)
@inline vinv_fast(v::AbstractSIMD{W,Float64}) where {W} =
_vinv_fast(v, has_feature(Val(:x86_64_avx512vl)))
@inline vfdiv_afast(a::VecUnroll{N}, b::VecUnroll{N}, ::False) where {N} =
VecUnroll(fmap(vfdiv_fast, getfield(a, :data), getfield(b, :data)))
@inline vfdiv_afast(a::VecUnroll{N}, b::VecUnroll{N}, ::True) where {N} =
VecUnroll(fmap(vfdiv_fast, getfield(a, :data), getfield(b, :data)))
@inline function vfdiv_afast(
a::VecUnroll{N,W,T,Vec{W,T}},
b::VecUnroll{N,W,T,Vec{W,T}},
::True
) where {N,W,T<:FloatingTypes}
VecUnroll(_vfdiv_afast(getfield(a, :data), getfield(b, :data)))
end
# @inline function _vfdiv_afast(a::Tuple{Vec{W,T},Vec{W,T},Vec{W,T},Vec{W,T},Vararg{Vec{W,T},K}}, b::Tuple{Vec{W,T},Vec{W,T},Vec{W,T},Vec{W,T},Vararg{Vec{W,T},K}}) where {W,K,T<:FloatingTypes}
# # c1 = vfdiv_fast(a[1], b[1])
# binv1 = _vinv_fast(b[1], True())
# c2 = vfdiv_fast(a[2], b[2])
# c3 = vfdiv_fast(a[3], b[3])
# c4 = vfdiv_fast(a[4], b[4])
# c1 = vmul_fast(a[1], binv1)
# (c1, c2, c3, c4, _vfdiv_afast(Base.tail(Base.tail(Base.tail(Base.tail(a)))), Base.tail(Base.tail(Base.tail(Base.tail(b)))))...)
# end
@inline function _vfdiv_afast(
a::Tuple{Vec{W,T},Vec{W,T},Vararg{Vec{W,T},K}},
b::Tuple{Vec{W,T},Vec{W,T},Vararg{Vec{W,T},K}}
) where {W,K,T<:FloatingTypes}
c1 = vmul_fast(a[1], _vinv_fast(b[1], True()))
c2 = vfdiv_fast(a[2], b[2])
(c1, c2, _vfdiv_afast(Base.tail(Base.tail(a)), Base.tail(Base.tail(b)))...)
end
@inline function _vfdiv_afast(
a::Tuple{Vec{W,T},Vec{W,T}},
b::Tuple{Vec{W,T},Vec{W,T}}
) where {W,T<:FloatingTypes}
c1 = vmul_fast(a[1], _vinv_fast(b[1], True()))
# c1 = vfdiv_fast(a[1], b[1])
c2 = vfdiv_fast(a[2], b[2])
(c1, c2)
end
ge_one_fma(::Val{:tigerlake}) = False()
ge_one_fma(::Val{:icelake}) = False()
ge_one_fma(::Val) = True()
@inline _vfdiv_afast(
a::Tuple{Vec{W,T}},
b::Tuple{Vec{W,T}}
) where {W,T<:FloatingTypes} =
(vfdiv_fast(getfield(a, 1, false), getfield(b, 1, false)),)
@inline _vfdiv_afast(a::Tuple{}, b::Tuple{}) = ()
@inline function vfdiv_fast(
a::VecUnroll{N,W,Float64,Vec{W,Float64}},
b::VecUnroll{N,W,Float64,Vec{W,Float64}}
) where {N,W}
vfdiv_afast(
a,
b,
has_feature(Val(:x86_64_avx512f)) & ge_one_fma(cpu_name())
)
end
# @inline vfdiv_fast(a::VecUnroll{N,W,Float32,Vec{W,Float32}},b::VecUnroll{N,W,Float32,Vec{W,Float32}}) where {N,W} = vfdiv_afast(a, b, True())
else
ge_one_fma(::Val) = True()
end
@inline function Base.mod(
x::AbstractSIMD{W,T},
y::AbstractSIMD{W,T}
) where {W,T<:FloatingTypes}
vfnmadd_fast(y, vfloor(vfdiv_fast(x, y)), x)
end
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 37711 | #
# We use these definitions because when we have other SIMD operations with masks
# LLVM optimizes the masks better.
function truncate_mask!(instrs, input, W, suffix, reverse_load::Bool = false)
mtyp_input = "i$(max(8,nextpow2(W)))"
mtyp_trunc = "i$(W)"
if reverse_load
bitreverse = "i$(W) @llvm.bitreverse.i$(W)"
decl = "declare $bitreverse(i$(W))"
bitrevmask = "bitrevmask.$(suffix)"
if mtyp_input == mtyp_trunc
str = """
%$(bitrevmask) = call $(bitreverse)($(mtyp_trunc) %$(input))
%mask.$(suffix) = bitcast $mtyp_input %$(bitrevmask) to <$W x i1>
"""
else
str = """
%masktrunc.$(suffix) = trunc $mtyp_input %$input to $mtyp_trunc
%$(bitrevmask) = call $(bitreverse)($(mtyp_trunc) %masktrunc.$(suffix))
%mask.$(suffix) = bitcast $mtyp_trunc %$(bitrevmask) to <$W x i1>
"""
end
else
decl = ""
if mtyp_input == mtyp_trunc
str = "%mask.$(suffix) = bitcast $mtyp_input %$input to <$W x i1>"
else
str = "%masktrunc.$(suffix) = trunc $mtyp_input %$input to $mtyp_trunc\n%mask.$(suffix) = bitcast $mtyp_trunc %masktrunc.$(suffix) to <$W x i1>"
end
end
push!(instrs, str)
decl
end
function zext_mask!(instrs, input, W, suffix, sext::Bool = false)
mtyp_input = "i$(max(8,nextpow2(W)))"
mtyp_trunc = "i$(W)"
str = if mtyp_input == mtyp_trunc
"%res.$(suffix) = bitcast <$W x i1> %$input to $mtyp_input"
else
ext = sext ? "sext" : "zext"
"%restrunc.$(suffix) = bitcast <$W x i1> %$input to $mtyp_trunc\n%res.$(suffix) = $ext $mtyp_trunc %restrunc.$(suffix) to $mtyp_input"
end
push!(instrs, str)
end
function binary_mask_op_instrs(W, op)
mtyp_input = "i$(max(8,nextpow2(W)))"
instrs = String[]
truncate_mask!(instrs, '0', W, 0)
truncate_mask!(instrs, '1', W, 1)
push!(instrs, "%combinedmask = $op <$W x i1> %mask.0, %mask.1")
zext_mask!(instrs, "combinedmask", W, 1)
push!(instrs, "ret $mtyp_input %res.1")
join(instrs, "\n")
end
function binary_mask_op(W, U, op, evl::Symbol = Symbol(""))
instrs = binary_mask_op_instrs(W, op)
mask = Expr(:curly, evl === Symbol("") ? :Mask : :EVLMask, W)
gf = GlobalRef(Core, :getfield)
gf1 = Expr(:call, gf, :m1, 1, false)
gf2 = Expr(:call, gf, :m2, 1, false)
llvmc = Expr(
:call,
GlobalRef(Base, :llvmcall),
instrs,
U,
:(Tuple{$U,$U}),
gf1,
gf2
)
call = Expr(:call, mask, llvmc)
evl === Symbol("") ||
push!(call.args, Expr(:call, evl, :($gf(m1, :evl)), :($gf(m2, :evl))))
Expr(:block, Expr(:meta, :inline), call)
end
@inline data(m::Mask) = getfield(m, :u)
@inline data(m::EVLMask) = getfield(m, :u)
@inline Base.convert(::Type{Mask{W,U}}, m::EVLMask{W,U}) where {W,U} =
Mask{W,U}(getfield(m, :u))
for (f, op, evl) ∈ [
(:vand, "and", :min),
(:vor, "or", :max),
(:vxor, "xor", Symbol("")),
(:veq, "icmp eq", Symbol("")),
(:vne, "icmp ne", Symbol(""))
]
@eval begin
@generated function $f(
m1::AbstractMask{W,U},
m2::AbstractMask{W,U}
) where {W,U}
binary_mask_op(
W,
U,
$op,
((m1 <: EVLMask) && (m2 <: EVLMask)) ? $(QuoteNode(evl)) : Symbol("")
)
end
end
end
for f ∈ [:vand, :vor, :vxor] # ignore irrelevant bits, so just bitcast to `Bool`
@eval @inline $f(a::Vec{W,Bool}, b::Vec{W,Bool}) where {W} =
vreinterpret(Bool, $f(vreinterpret(UInt8, a), vreinterpret(UInt8, b)))
end
for f ∈ [:vne, :veq] # Here we truncate.
@eval @inline $f(a::Vec{W,Bool}, b::Vec{W,Bool}) where {W} =
convert(Bool, $f(convert(Bit, a), convert(Bit, b)))
end
@generated function vconvert(
::Type{Vec{W,I}},
m::AbstractMask{W,U}
) where {W,I<:Union{IntegerTypesHW,Bool},U<:Union{UInt8,UInt16,UInt32,UInt64}}
bits = 8sizeof(I)
instrs = String[]
truncate_mask!(instrs, '0', W, 0)
push!(
instrs,
"%res = zext <$W x i1> %mask.0 to <$W x i$(bits)>\nret <$W x i$(bits)> %res"
)
gf = Expr(:call, GlobalRef(Core, :getfield), :m, 1, false)
llvmc = Expr(
:call,
GlobalRef(Base, :llvmcall),
join(instrs, "\n"),
:(_Vec{$W,$I}),
:(Tuple{$U}),
gf
)
Expr(:block, Expr(:meta, :inline), Expr(:call, :Vec, llvmc))
end
@generated function splitint(
i::S,
::Type{T}
) where {S<:Base.BitInteger,T<:Union{Bool,Base.BitInteger}}
sizeof_S = sizeof(S)
sizeof_T = sizeof(T)
if sizeof_T > sizeof_S
return :(i % T)
elseif sizeof_T == sizeof_S
return :i
end
W, r = divrem(sizeof_S, sizeof_T)
@assert iszero(r)
vtyp = "<$W x i$(8sizeof_T)>"
instrs = """
%split = bitcast i$(8sizeof_S) %0 to $vtyp
ret $vtyp %split
"""
quote
$(Expr(:meta, :inline))
Vec($LLVMCALL($instrs, _Vec{$W,$T}, Tuple{$S}, i))
end
end
@generated function fuseint(
v::Vec{W,I}
) where {W,I<:Union{Bool,Base.BitInteger}}
@assert ispow2(W)
bytes = W * sizeof(I)
bits = 8bytes
@assert bytes ≤ 16
T = (I <: Signed) ? Symbol(:Int, bits) : Symbol(:UInt, bits)
vtyp = "<$W x i$(8sizeof(I))>"
styp = "i$(bits)"
instrs = """
%fused = bitcast $vtyp %0 to $styp
ret $styp %fused
"""
quote
$(Expr(:meta, :inline))
$LLVMCALL($instrs, $T, Tuple{_Vec{$W,$I}}, data(v))
end
end
function vadd_expr(W, U, instr)
instrs = String[]
truncate_mask!(instrs, '0', W, 0)
truncate_mask!(instrs, '1', W, 1)
push!(
instrs,
"""%uv.0 = zext <$W x i1> %mask.0 to <$W x i8>
%uv.1 = zext <$W x i1> %mask.1 to <$W x i8>
%res = $instr <$W x i8> %uv.0, %uv.1
ret <$W x i8> %res"""
)
Expr(
:block,
Expr(:meta, :inline),
:(Vec(
$LLVMCALL(
$(join(instrs, "\n")),
_Vec{$W,UInt8},
Tuple{$U,$U},
getfield(m1, :u),
getfield(m2, :u)
)
))
)
end
@generated vadd_fast(m1::AbstractMask{W,U}, m2::AbstractMask{W,U}) where {W,U} =
vadd_expr(W, U, "add")
@generated vsub_fast(m1::AbstractMask{W,U}, m2::AbstractMask{W,U}) where {W,U} =
vadd_expr(W, U, "sub")
@inline vadd(m1::AbstractMask{W,U}, m2::AbstractMask{W,U}) where {W,U} =
vadd_fast(m1, m2)
@inline vsub(m1::AbstractMask{W,U}, m2::AbstractMask{W,U}) where {W,U} =
vsub_fast(m1, m2)
@inline Base.:(&)(m::AbstractMask{W,U}, b::Bool) where {W,U} =
Mask{W,U}(Core.ifelse(b, getfield(m, :u), zero(getfield(m, :u))))
@inline Base.:(&)(b::Bool, m::AbstractMask{W,U}) where {W,U} =
Mask{W,U}(Core.ifelse(b, getfield(m, :u), zero(getfield(m, :u))))
@inline Base.:(|)(m::AbstractMask{W,U}, b::Bool) where {W,U} =
Mask{W,U}(Core.ifelse(b, getfield(max_mask(Mask{W,U}), :u), getfield(m, :u)))
@inline Base.:(|)(b::Bool, m::AbstractMask{W,U}) where {W,U} =
Mask{W,U}(Core.ifelse(b, getfield(max_mask(Mask{W,U}), :u), getfield(m, :u)))
@inline function Base.:(&)(m::EVLMask{W,U}, b::Bool) where {W,U}
EVLMask{W,U}(
Core.ifelse(b, getfield(m, :u), zero(getfield(m, :u))),
Core.ifelse(b, getfield(m, :evl), 0x00000000)
)
end
@inline function Base.:(&)(b::Bool, m::EVLMask{W,U}) where {W,U}
EVLMask{W,U}(
Core.ifelse(b, getfield(m, :u), zero(getfield(m, :u))),
Core.ifelse(b, getfield(m, :evl), 0x00000000)
)
end
@inline function Base.:(|)(m::EVLMask{W,U}, b::Bool) where {W,U}
EVLMask{W,U}(
Core.ifelse(b, getfield(max_mask(Mask{W,U}), :u), getfield(m, :u)),
Core.ifelse(b, W % UInt32, getfield(m, :evl))
)
end
@inline function Base.:(|)(b::Bool, m::EVLMask{W,U}) where {W,U}
EVLMask{W,U}(
Core.ifelse(b, getfield(max_mask(Mask{W,U}), :u), getfield(m, :u)),
Core.ifelse(b, W % UInt32, getfield(m, :evl))
)
end
@inline Base.:(⊻)(m::AbstractMask{W,U}, b::Bool) where {W,U} =
Mask{W,U}(Core.ifelse(b, ~getfield(m, :u), getfield(m, :u)))
@inline Base.:(⊻)(b::Bool, m::AbstractMask{W,U}) where {W,U} =
Mask{W,U}(Core.ifelse(b, ~getfield(m, :u), getfield(m, :u)))
@inline vshl(m::AbstractMask{W,U}, i::IntegerTypesHW) where {W,U} =
Mask{W,U}(vshl(getfield(m, :u), i))
@inline vashr(m::AbstractMask{W,U}, i::IntegerTypesHW) where {W,U} =
Mask{W,U}(vashr(getfield(m, :u), i))
@inline vlshr(m::AbstractMask{W,U}, i::IntegerTypesHW) where {W,U} =
Mask{W,U}(vlshr(getfield(m, :u), i))
@inline zero_mask(::AbstractSIMDVector{W}) where {W} = Mask(zero_mask(Val(W)))
@inline zero_mask(::VecUnroll{N,W}) where {N,W} =
VecUnroll{N}(Mask(zero_mask(Val(W))))
@inline max_mask(::AbstractSIMDVector{W}) where {W} = Mask(max_mask(Val(W)))
@inline max_mask(::VecUnroll{N,W}) where {N,W} =
VecUnroll{N}(Mask(max_mask(Val(W))))
@inline zero_mask(::NativeTypes) = false
@inline max_mask(::NativeTypes) = true
@generated function sext(
::Type{Vec{W,I}},
m::AbstractMask{W,U}
) where {W,I<:IntegerTypesHW,U<:Union{UInt8,UInt16,UInt32,UInt64}}
bits = 8sizeof(I)
instrs = String[]
truncate_mask!(instrs, '0', W, 0)
push!(
instrs,
"%res = sext <$W x i1> %mask.0 to <$W x i$(bits)>\nret <$W x i$(bits)> %res"
)
gf = Expr(:call, GlobalRef(Core, :getfield), :m, 1, false)
llvmc = Expr(
:call,
GlobalRef(Base, :llvmcall),
join(instrs, "\n"),
:(_Vec{$W,$I}),
:(Tuple{$U}),
gf
)
Expr(:block, Expr(:meta, :inline), Expr(:call, :Vec, llvmc))
end
@inline function vany(m::AbstractMask)
_vany(m, has_feature(Val(:x86_64_avx512f)) | (!has_feature(Val(:x86_64_avx))))
end
@inline function _vany(m::Mask{8}, ::False)
x = reinterpret(Float32, sext(Vec{8,Int32}, m))
ccall(
"llvm.x86.avx.vtestz.ps.256",
llvmcall,
Int32,
(_Vec{8,Float32}, _Vec{8,Float32}),
data(x),
data(x)
) == 0
end
for (U, W) in [(UInt8, 8), (UInt16, 16), (UInt32, 32), (UInt64, 64)]
z = zero(U)
tm = typemax(U)
@eval @inline _vany(m::AbstractMask{$W,$U}, ::B) where {B} =
getfield(m, :u) != $z
@eval @inline vall(m::AbstractMask{$W,$U}) = getfield(m, :u) == $tm
end
# TODO: use vector reduction intrsincs
@inline function _vany(m::AbstractMask{W}, ::B) where {W,B}
mm = getfield(max_mask(Val{W}()), :u)
mu = getfield(m, :u)
(mu & mm) !== zero(mu)
end
@inline function vall(m::AbstractMask{W}) where {W}
mm = getfield(max_mask(Val{W}()), :u)
mu = getfield(m, :u)
mm & mu === mm
end
@inline vany(b::Bool) = b
@inline vall(b::Bool) = b
@inline vsum(m::AbstractMask) = count_ones(getfield(m, :u))
@inline vprod(m::AbstractMask) = vall(m)
@generated function vnot(m::AbstractMask{W,U}) where {W,U}
mtyp_input = "i$(8sizeof(U))"
mtyp_trunc = "i$(W)"
instrs = String[]
truncate_mask!(instrs, '0', W, 0)
mask = llvmconst(W, "i1 true")
push!(instrs, "%resvec.0 = xor <$W x i1> %mask.0, $mask")
zext_mask!(instrs, "resvec.0", W, 1)
push!(instrs, "ret $mtyp_input %res.1")
quote
$(Expr(:meta, :inline))
Mask{$W}($LLVMCALL($(join(instrs, "\n")), $U, Tuple{$U}, getfield(m, :u)))
end
end
@inline vnot(x::Bool) = Base.not_int(x)
# @inline Base.:(~)(m::Mask) = !m
@inline Base.count_ones(m::AbstractMask) = count_ones(getfield(m, :u))
@inline vcount_ones(m::AbstractMask) = count_ones(getfield(m, :u))
@inline vadd(m::AbstractMask, i::IntegerTypesHW) = i + count_ones(m)
@inline vadd(i::IntegerTypesHW, m::AbstractMask) = i + count_ones(m)
@generated function vzero(::Type{M}) where {W,M<:Mask{W}}
Expr(
:block,
Expr(:meta, :inline),
Expr(
:call,
Expr(:curly, :Mask, W),
Expr(:call, :zero, mask_type_symbol(W))
)
)
end
@generated function vzero(::Type{M}) where {W,M<:EVLMask{W}}
Expr(
:block,
Expr(:meta, :inline),
Expr(
:call,
Expr(:curly, :EVLMask, W),
Expr(:call, :zero, mask_type_symbol(W)),
0x00000000
)
)
end
@inline vzero(::Mask{W,U}) where {W,U} = Mask{W}(zero(U))
@inline vzero(::EVLMask{W,U}) where {W,U} = EVLMask{W}(zero(U), 0x00000000)
@inline Base.zero(::Type{M}) where {W,M<:AbstractMask{W}} = vzero(M)
@inline zero_mask(::Union{Val{W},StaticInt{W}}) where {W} =
EVLMask{W}(zero(VectorizationBase.mask_type(Val{W}())), 0x00000000)
@generated function max_mask(::Union{Val{W},StaticInt{W}}) where {W}
U = mask_type(W)
:(EVLMask{$W,$U}($(one(U) << W - one(U)), $(UInt32(W))))
end
@inline max_mask(::Type{T}) where {T} = max_mask(pick_vector_width(T))
@generated max_mask(::Type{Mask{W,U}}) where {W,U} =
EVLMask{W,U}(one(U) << W - one(U), W % UInt32)
@generated function valrem(
::Union{Val{W},StaticInt{W}},
l::T
) where {W,T<:Union{Integer,StaticInt}}
ex = ispow2(W) ? :(l & $(T(W - 1))) : Expr(:call, Base.urem_int, :l, T(W))
Expr(:block, Expr(:meta, :inline), ex)
end
function bzhi_quote(b)
T = b == 32 ? :UInt32 : :UInt64
typ = 'i' * string(b)
instr = "i$b @llvm.x86.bmi.bzhi.$b"
decl = "declare $instr(i$b, i$b) nounwind readnone"
instrs = "%res = call $instr(i$b %0, i$b %1)\n ret i$b %res"
llvmcall_expr(decl, instrs, T, :(Tuple{$T,$T}), typ, [typ, typ], [:a, :b])
end
@generated bzhi(a::UInt32, b::UInt32) = bzhi_quote(32)
@generated bzhi(a::UInt64, b::UInt64) = bzhi_quote(64)
# @generated function _mask(::Union{Val{W},StaticInt{W}}, l::I, ::True) where {W,I<:Union{Integer,StaticInt}}
# # if `has_opmask_registers()` then we can use bitmasks directly, so we create them via bittwiddling
# M = mask_type(W)
# quote # If the arch has opmask registers, we can generate a bitmask and then move it into the opmask register
# $(Expr(:meta,:inline))
# evl = valrem(Val{$W}(), (l % $M) - one($M))
# EVLMask{$W,$M}($(typemax(M)) >>> ($(M(8sizeof(M))-1) - evl), evl + one(evl))
# end
# end
@generated function _mask_bzhi(
::Union{Val{W},StaticInt{W}},
l::I
) where {W,I<:Union{Integer,StaticInt}}
U = mask_type_symbol(W)
T = W > 32 ? :UInt64 : :UInt32
quote
$(Expr(:meta, :inline))
m = valrem(StaticInt{$W}(), l % $T)
m = Core.ifelse((m % UInt8) == 0x00, $W % $T, m)
EVLMask{$W,$U}(bzhi(-1 % $T, m) % $U, m)
end
end
# @inline function _mask_bzhi(::Union{Val{W},StaticInt{W}}, l::I) where {W,I<:Union{Integer,StaticInt}}
# U = mask_type(StaticInt(W))
# # m = ((l) % UInt32) & ((W-1) % UInt32)
# m = valrem(StaticInt{W}(), l % UInt32)
# m = Core.ifelse((m % UInt8) == 0x00, W % UInt32, m)
# # m = Core.ifelse(zero(m) == m, -1 % UInt32, m)
# EVLMask{W,U}(bzhi(-1 % UInt32, m) % U, m)
# end
# @inline function _mask(::Union{Val{W},StaticInt{W}}, l::I, ::True) where {W,I<:Union{Integer,StaticInt}}
# U = mask_type(StaticInt(W))
# m = ((l-one(l)) % UInt32) & ((W-1) % UInt32)
# m += one(m)
# EVLMask{W,U}(bzhi(-1 % UInt32, m) % U, m)
# end
# @generated function _mask(::Union{Val{W},StaticInt{W}}, l::I, ::True) where {W,I<:Union{Integer,StaticInt}}
# M = mask_type_symbol(W)
# quote
# $(Expr(:meta,:inline))
# evl = valrem(Val{$W}(), vsub_nw((l % $M), one($M)))
# EVLMask{$W}(data(evl ≥ MM{$W}(0)), vadd_nw(evl, one(evl)))
# end
# end
function mask_shift_quote(W::Int, bmi::Bool)
if (((Sys.ARCH === :x86_64) || (Sys.ARCH === :i686))) &&
bmi &&
(W <= 64) &&
(ccall(:jl_generating_output, Cint, ()) != 1)
return Expr(:block, Expr(:meta, :inline), :(_mask_bzhi(StaticInt{$W}(), l)))
end
MT = mask_type(W)
quote # If the arch has opmask registers, we can generate a bitmask and then move it into the opmask register
$(Expr(:meta, :inline))
evl = valrem(Val{$W}(), (l % $MT) - one($MT))
EVLMask{$W,$MT}(
$(typemax(MT)) >>> ($(MT(8sizeof(MT)) - 1) - evl),
evl + one(evl)
)
end
end
@generated _mask_shift(::StaticInt{W}, l, ::True) where {W} =
mask_shift_quote(W, true)
@generated _mask_shift(::StaticInt{W}, l, ::False) where {W} =
mask_shift_quote(W, false)
@static if Base.libllvm_version ≥ v"12"
function active_lane_mask_quote(W::Int)
quote
$(Expr(:meta, :inline))
upper = (l % UInt32) & $((UInt32(W - 1)))
upper = Core.ifelse(upper == 0x00000000, $(W % UInt32), upper)
mask(Val{$W}(), 0x00000000, upper)
end
end
else
function active_lane_mask_quote(W::Int)
quote
$(Expr(:meta, :inline))
mask(
Val{$W}(),
0x00000000,
vsub_nw(l % UInt32, 0x00000001) & $(UInt32(W - 1))
)
end
end
end
function mask_cmp_quote(W::Int, RS::Int, bmi::Bool)
M = mask_type_symbol(W)
bytes = min(RS ÷ W, 8)
bytes < 4 && return mask_shift_quote(W, bmi)
T = integer_of_bytes_symbol(bytes, true)
quote
$(Expr(:meta, :inline))
evl = valrem(Val{$W}(), vsub_nw((l % $T), one($T)))
EVLMask{$W}(data(evl ≥ MM{$W}(zero($T))), vadd_nw(evl, one(evl)))
end
end
@generated _mask_cmp(
::Union{Val{W},StaticInt{W}},
l::I,
::StaticInt{RS},
::True
) where {W,RS,I<:Union{Integer,StaticInt}} = mask_cmp_quote(W, RS, true)
@generated _mask_cmp(
::Union{Val{W},StaticInt{W}},
l::I,
::StaticInt{RS},
::False
) where {W,RS,I<:Union{Integer,StaticInt}} = mask_cmp_quote(W, RS, false)
@generated _mask(
::Union{Val{W},StaticInt{W}},
l::I,
::True
) where {W,I<:Union{Integer,StaticInt}} = mask_shift_quote(W, true)
@generated function _mask(
::Union{Val{W},StaticInt{W}},
l::I,
::False
) where {W,I<:Union{Integer,StaticInt}}
# Otherwise, it's probably more efficient to use a comparison, as this will probably create some type that can be used directly for masked moves/blends/etc
if W > 16
Expr(
:block,
Expr(:meta, :inline),
:(_mask_shift(StaticInt{$W}(), l, has_feature(Val(:x86_64_bmi2))))
)
# mask_shift_quote(W)
# elseif (Base.libllvm_version ≥ v"11") && ispow2(W)
# elseif ((Sys.ARCH ≢ :x86_64) && (Sys.ARCH ≢ :i686)) && (Base.libllvm_version ≥ v"11") && ispow2(W)
elseif false
# cmpval = Base.libllvm_version ≥ v"12" ? -one(I) : zero(I)
active_lane_mask_quote(W)
else
Expr(
:block,
Expr(:meta, :inline),
:(_mask_cmp(
Val{$W}(),
l,
simd_integer_register_size(),
has_feature(Val(:x86_64_bmi2))
))
)
end
end
# This `mask` method returns a constant, independent of `has_opmask_registers()`; that only effects method of calculating
# the constant. So it'd be safe to bake in a value.
@inline mask(::Union{Val{W},StaticInt{W}}, L) where {W} = _mask(
StaticInt(W),
L,
has_feature(Val(:x86_64_avx512f)) & ge_one_fma(cpu_name())
)
@inline mask(::Union{Val{W},StaticInt{W}}, ::StaticInt{L}) where {W,L} = _mask(
StaticInt(W),
L,
has_feature(Val(:x86_64_avx512f)) & ge_one_fma(cpu_name())
)
@inline mask(::Type{T}, l::Union{Integer,StaticInt}) where {T} = _mask(
pick_vector_width(T),
l,
has_feature(Val(:x86_64_avx512f)) & ge_one_fma(cpu_name())
)
# @generated function masktable(::Union{Val{W},StaticInt{W}}, rem::Union{Integer,StaticInt}) where {W}
# masks = Expr(:tuple)
# for w ∈ 0:W-1
# push!(masks.args, data(mask(Val(W), w == 0 ? W : w)))
# end
# Expr(
# :block,
# Expr(:meta,:inline),
# Expr(:call, Expr(:curly, :Mask, W), Expr(
# :macrocall, Symbol("@inbounds"), LineNumberNode(@__LINE__, Symbol(@__FILE__)),
# Expr(:call, :getindex, masks, Expr(:call, :+, 1, Expr(:call, :valrem, Expr(:call, Expr(:curly, W)), :rem)))
# ))
# )
# end
@inline tomask(m::VecUnroll) = VecUnroll(fmap(tomask, data(m)))
@inline tomask(m::Unsigned) = Mask{sizeof(m)}(m)
@inline tomask(m::Mask) = m
@generated function tomask(v::Vec{W,Bool}) where {W}
usize = W > 8 ? nextpow2(W) : 8
utyp = "i$(usize)"
U = mask_type_symbol(W)
instrs = String[]
push!(instrs, "%bitvec = trunc <$W x i8> %0 to <$W x i1>")
zext_mask!(instrs, "bitvec", W, 0)
push!(instrs, "ret i$(usize) %res.0")
quote
$(Expr(:meta, :inline))
Mask{$W}(
$LLVMCALL($(join(instrs, "\n")), $U, Tuple{_Vec{$W,Bool}}, data(v))
)
end
end
@inline tomask(v::AbstractSIMDVector{W,Bool}) where {W} =
tomask(vconvert(Vec{W,Bool}, data(v)))
# @inline tounsigned(m::Mask) = getfield(m, :u)
# @inline tounsigned(m::Vec{W,Bool}) where {W} = getfield(tomask(m), :u)
@inline tounsigned(v) = getfield(tomask(v), :u)
@generated function vrem(m::Mask{W,U}, ::Type{I}) where {W,U,I<:IntegerTypesHW}
bits = 8sizeof(I)
instrs = String[]
truncate_mask!(instrs, '0', W, 0)
push!(
instrs,
"%res = zext <$W x i1> %mask.0 to <$W x i$(bits)>\nret <$W x i$(bits)> %res"
)
quote
$(Expr(:meta, :inline))
Vec($LLVMCALL($(join(instrs, "\n")), _Vec{$W,$I}, Tuple{$U}, data(m)))
end
end
Vec(m::Mask{W}) where {W} = m % int_type(Val{W}())
# @inline getindexzerobased(m::Mask, i) = (getfield(m, :u) >>> i) % Bool
# @inline function extractelement(m::Mask{W}, i::Union{Integer,StaticInt}) where {W}
# @boundscheck i > W && throw(BoundsError(m, i))
# getindexzerobased(m, i)
# end
@generated function extractelement(v::Mask{W,U}, i::I) where {W,U,I}
instrs = String[]
truncate_mask!(instrs, '0', W, 0)
push!(instrs, "%res1 = extractelement <$W x i1> %mask.0, i$(8sizeof(I)) %1")
push!(instrs, "%res8 = zext i1 %res1 to i8\nret i8 %res8")
instrs_string = join(instrs, "\n")
call = :($LLVMCALL($instrs_string, Bool, Tuple{$U,$I}, data(v), i))
Expr(:block, Expr(:meta, :inline), call)
end
@generated function insertelement(
v::Mask{W,U},
x::T,
i::I
) where {W,T,U,I<:Union{Bool,IntegerTypesHW}}
mtyp_input = "i$(max(8,nextpow2(W)))"
instrs = String["%bit = trunc i$(8sizeof(T)) %1 to i1"]
truncate_mask!(instrs, '0', W, 0)
push!(
instrs,
"%bitvec = insertelement <$W x i1> %mask.0, i1 %bit, i$(8sizeof(I)) %2"
)
zext_mask!(instrs, "bitvec", W, 1)
push!(instrs, "ret $(mtyp_input) %res.1")
instrs_string = join(instrs, "\n")
call =
:(Mask{$W}($LLVMCALL($instrs_string, $U, Tuple{$U,$T,$I}, data(v), x, i)))
Expr(:block, Expr(:meta, :inline), call)
end
# @generated function Base.isodd(i::MM{W,1}) where {W}
# U = mask_type(W)
# evenfirst = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa % U
# # Expr(:block, Expr(:meta, :inline), :(isodd(getfield(i, :i)) ? Mask{$W}($oddfirst) : Mask{$W}($evenfirst)))
# Expr(:block, Expr(:meta, :inline), :(Mask{$W}($evenfirst >> (getfield(i, :i) & 0x03))))
# end
# @generated function Base.iseven(i::MM{W,1}) where {W}
# U = mask_type(W)
# oddfirst = 0x55555555555555555555555555555555 % U
# # evenfirst = oddfirst << 1
# # Expr(:block, Expr(:meta, :inline), :(isodd(getfield(i, :i)) ? Mask{$W}($evenfirst) : Mask{$W}($oddfirst)))
# Expr(:block, Expr(:meta, :inline), :(Mask{$W}($oddfirst >> (getfield(i, :i) & 0x03))))
# end
@inline Base.isodd(i::MM{W,1}) where {W} = Mask{W}(
(0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa % mask_type(Val{W}())) >>>
(getfield(i, :i) & 0x01)
)
@inline Base.iseven(i::MM{W,1}) where {W} = Mask{W}(
(0x55555555555555555555555555555555 % mask_type(Val{W}())) <<
(getfield(i, :i) & 0x01)
)
@inline Base.isodd(m::AbstractMask) = m
@inline Base.iseven(m::AbstractMask) = !m
@inline Base.isodd(m::VecUnroll{<:Any,<:Any,Bit,<:AbstractMask}) = m
@inline Base.iseven(m::VecUnroll{<:Any,<:Any,Bit,<:AbstractMask}) = !m
function cmp_quote(W, cond, vtyp, T1, T2 = T1)
instrs = String["%m = $cond $vtyp %0, %1"]
zext_mask!(instrs, 'm', W, '0')
push!(instrs, "ret i$(max(8,nextpow2(W))) %res.0")
U = mask_type_symbol(W)
quote
$(Expr(:meta, :inline))
Mask{$W}(
$LLVMCALL(
$(join(instrs, "\n")),
$U,
Tuple{_Vec{$W,$T1},_Vec{$W,$T2}},
data(v1),
data(v2)
)
)
end
end
function icmp_quote(W, cond, bytes, T1, T2 = T1)
vtyp = vtype(W, "i$(8bytes)")
cmp_quote(W, "icmp " * cond, vtyp, T1, T2)
end
function fcmp_quote(W, cond, T)
vtyp = vtype(W, T === Float32 ? "float" : "double")
cmp_quote(W, "fcmp nsz arcp contract reassoc " * cond, vtyp, T)
end
# @generated function compare(::Val{cond}, v1::Vec{W,I}, v2::Vec{W,I}) where {cond, W, I}
# cmp_quote(W, cond, sizeof(I), I)
# end
# for (f,cond) ∈ [(:(==), :eq), (:(!=), :ne), (:(>), :ugt), (:(≥), :uge), (:(<), :ult), (:(≤), :ule)]
for (f, cond) ∈ [(:veq, "eq"), (:vne, "ne")]
@eval @generated function $f(
v1::Vec{W,T1},
v2::Vec{W,T2}
) where {W,T1<:IntegerTypesHW,T2<:IntegerTypesHW}
if sizeof(T1) != sizeof(T2)
return Expr(
:block,
Expr(:meta, :inline),
:((v3, v4) = promote(v1, v2)),
Expr(:call, $f, :v3, :v4)
)
end
icmp_quote(W, $cond, sizeof(T1), T1, T2)
end
end
for (f, cond) ∈ [(:vgt, "ugt"), (:vge, "uge"), (:vlt, "ult"), (:vle, "ule")]
@eval @generated function $f(v1::Vec{W,T}, v2::Vec{W,T}) where {W,T<:Unsigned}
icmp_quote(W, $cond, sizeof(T), T)
end
end
for (f, cond) ∈ [(:vgt, "sgt"), (:vge, "sge"), (:vlt, "slt"), (:vle, "sle")]
@eval @generated function $f(v1::Vec{W,T}, v2::Vec{W,T}) where {W,T<:Signed}
icmp_quote(W, $cond, sizeof(T), T)
end
end
# for (f,cond) ∈ [(:veq, "oeq"), (:vgt, "ogt"), (:vge, "oge"), (:vlt, "olt"), (:vle, "ole"), (:vne, "one")]
for (f, cond) ∈ [
(:veq, "oeq"),
(:vgt, "ogt"),
(:vge, "oge"),
(:vlt, "olt"),
(:vle, "ole"),
(:vne, "une")
]
# for (f,cond) ∈ [(:veq, "ueq"), (:vgt, "ugt"), (:vge, "uge"), (:vlt, "ult"), (:vle, "ule"), (:vne, "une")]
@eval @generated function $f(
v1::Vec{W,T},
v2::Vec{W,T}
) where {W,T<:Union{Float32,Float64}}
fcmp_quote(W, $cond, T)
end
end
@inline function vgt(
v1::AbstractSIMDVector{W,S},
v2::AbstractSIMDVector{W,U}
) where {W,S<:SignedHW,U<:UnsignedHW}
(v1 > zero(S)) & (vconvert(U, v1) > v2)
end
@inline function vgt(
v1::AbstractSIMDVector{W,U},
v2::AbstractSIMDVector{W,S}
) where {W,S<:SignedHW,U<:UnsignedHW}
(v2 < zero(S)) | (vconvert(S, v1) > v2)
end
@inline function vge(
v1::AbstractSIMDVector{W,S},
v2::AbstractSIMDVector{W,U}
) where {W,S<:SignedHW,U<:UnsignedHW}
(v1 ≥ zero(S)) & (vconvert(U, v1) ≥ v2)
end
@inline function vge(
v1::AbstractSIMDVector{W,U},
v2::AbstractSIMDVector{W,S}
) where {W,S<:SignedHW,U<:UnsignedHW}
(v2 < zero(S)) | (vconvert(S, v1) ≥ v2)
end
@inline vlt(
v1::AbstractSIMDVector{W,S},
v2::AbstractSIMDVector{W,U}
) where {W,S<:SignedHW,U<:UnsignedHW} = vgt(v2, v1)
@inline vlt(
v1::AbstractSIMDVector{W,U},
v2::AbstractSIMDVector{W,S}
) where {W,S<:SignedHW,U<:UnsignedHW} = vgt(v2, v1)
@inline vle(
v1::AbstractSIMDVector{W,S},
v2::AbstractSIMDVector{W,U}
) where {W,S<:SignedHW,U<:UnsignedHW} = vge(v2, v1)
@inline vle(
v1::AbstractSIMDVector{W,U},
v2::AbstractSIMDVector{W,S}
) where {W,S<:SignedHW,U<:UnsignedHW} = vge(v2, v1)
for (op, f) ∈ [(:vgt, :(>)), (:vge, :(≥)), (:vlt, :(<)), (:vle, :(≤))]
@eval begin
@inline function $op(
v1::V1,
v2::V2
) where {
V1<:Union{IntegerTypesHW,AbstractSIMDVector{<:Any,<:IntegerTypesHW}},
V2<:Union{IntegerTypesHW,AbstractSIMDVector{<:Any,<:IntegerTypesHW}}
}
V3 = promote_type(V1, V2)
$op(itosize(v1, V3), itosize(v2, V3))
end
@inline function $op(v1, v2)
v3, v4 = promote(v1, v2)
$op(v3, v4)
end
@inline $op(s1::IntegerTypesHW, s2::IntegerTypesHW) = $f(s1, s2)
@inline $op(s1::Union{Float32,Float64}, s2::Union{Float32,Float64}) =
$f(s1, s2)
end
end
for (op, f) ∈ [(:veq, :(==)), (:vne, :(≠))]
@eval begin
@inline $op(a, b) = ((c, d) = promote(a, b); $op(c, d))
@inline $op(s1::NativeTypes, s2::NativeTypes) = $f(s1, s2)
end
end
@generated function vifelse(
m::AbstractMask{W,U},
v1::Vec{W,T},
v2::Vec{W,T}
) where {W,U,T}
typ = LLVM_TYPES[T]
vtyp = vtype(W, typ)
selty = vtype(W, "i1")
f = "select"
if Base.libllvm_version ≥ v"9" && ((T === Float32) || (T === Float64))
f *= " nsz arcp contract reassoc"
end
instrs = String[]
truncate_mask!(instrs, '0', W, 0)
push!(instrs, "%res = $f $selty %mask.0, $vtyp %1, $vtyp %2\nret $vtyp %res")
quote
$(Expr(:meta, :inline))
Vec(
$LLVMCALL(
$(join(instrs, "\n")),
_Vec{$W,$T},
Tuple{$U,_Vec{$W,$T},_Vec{$W,$T}},
data(m),
data(v1),
data(v2)
)
)
end
end
@inline vifelse(m::Vec{W,Bool}, s1::T, s2::T) where {W,T<:NativeTypes} =
vifelse(m, Vec{W,T}(s1), Vec{W,T}(s2))
@inline function vifelse(
m::AbstractMask{W},
s1::T,
s2::T
) where {W,T<:NativeTypes}
vifelse(m, Vec{W,T}(s1), Vec{W,T}(s2))
end
@inline vifelse(m::AbstractMask{W,U}, s1, s2) where {W,U} =
((x1, x2) = promote(s1, s2); vifelse(m, x1, x2))
@inline vifelse(
m::AbstractMask{W},
v1::VecUnroll{N,W},
v2::VecUnroll{N,W}
) where {N,W} =
VecUnroll(fmap(vifelse, m, getfield(v1, :data), getfield(v2, :data)))
@inline vifelse(m::AbstractMask, a::MM, b::MM) = vifelse(m, Vec(a), Vec(b))
@inline Base.Bool(m::AbstractMask{1,UInt8}) = (getfield(m, :u) & 0x01) === 0x01
@inline vconvert(::Type{Bool}, m::AbstractMask{1,UInt8}) =
(getfield(m, :u) & 0x01) === 0x01
@inline vifelse(m::AbstractMask{1}, s1::T, s2::T) where {T<:NativeTypes} =
Base.ifelse(Bool(m), s1, s2)
@inline vifelse(
f::F,
m::AbstractSIMD{W,B},
a::Vararg{NativeTypesV,K}
) where {F<:Function,K,W,B<:Union{Bool,Bit}} = vifelse(m, f(a...), a[K])
@inline vifelse(
f::F,
m::Bool,
a::Vararg{NativeTypesV,K}
) where {F<:Function,K} = ifelse(m, f(a...), a[K])
@inline vconvert(::Type{EVLMask{W,U}}, b::Bool) where {W,U} =
b & max_mask(StaticInt{W}())
@inline vifelse(
m::AbstractMask{W},
a::AbstractMask{W},
b::AbstractMask{W}
) where {W} = bitselect(m, b, a)
@inline Base.isnan(v::AbstractSIMD) = v != v
@inline Base.isfinite(x::AbstractSIMD) = iszero(x - x)
@inline Base.flipsign(x::AbstractSIMD, y::AbstractSIMD) =
vifelse(y > zero(y), x, -x)
for T ∈ [:Float32, :Float64]
@eval begin
@inline Base.flipsign(x::AbstractSIMD, y::$T) = vifelse(y > zero(y), x, -x)
@inline Base.flipsign(x::$T, y::AbstractSIMD) = vifelse(y > zero(y), x, -x)
end
end
@inline Base.flipsign(x::AbstractSIMD, y::Real) = ifelse(y > zero(y), x, -x)
@inline Base.flipsign(x::Real, y::AbstractSIMD) = ifelse(y > zero(y), x, -x)
@inline Base.flipsign(x::Signed, y::AbstractSIMD) = ifelse(y > zero(y), x, -x)
@inline Base.isodd(x::AbstractSIMD{W,T}) where {W,T<:Union{Integer,StaticInt}} =
(x & one(T)) != zero(T)
@inline Base.iseven(
x::AbstractSIMD{W,T}
) where {W,T<:Union{Integer,StaticInt}} = (x & one(T)) == zero(T)
@generated function vifelse(
m::Vec{W,Bool},
v1::Vec{W,T},
v2::Vec{W,T}
) where {W,T}
typ = LLVM_TYPES[T]
vtyp = vtype(W, typ)
selty = vtype(W, "i1")
f = "select"
if Base.libllvm_version ≥ v"9" && ((T === Float32) || (T === Float64))
f *= " nsz arcp contract reassoc"
end
instrs = String["%mask.0 = trunc <$W x i8> %0 to <$W x i1>"]
push!(instrs, "%res = $f $selty %mask.0, $vtyp %1, $vtyp %2\nret $vtyp %res")
quote
$(Expr(:meta, :inline))
Vec(
$LLVMCALL(
$(join(instrs, "\n")),
_Vec{$W,$T},
Tuple{_Vec{$W,Bool},_Vec{$W,$T},_Vec{$W,$T}},
data(m),
data(v1),
data(v2)
)
)
end
end
@inline vifelse(b::Bool, w, x) = ((y, z) = promote(w, x); vifelse(b, y, z))
@inline vifelse(
b::Bool,
w::T,
x::T
) where {T<:Union{NativeTypes,AbstractSIMDVector}} = Core.ifelse(b, w, x)
@inline vifelse(b::Bool, w::T, x::T) where {T<:VecUnroll} =
VecUnroll(fmap(Core.ifelse, b, getfield(w, :data), getfield(x, :data)))
@generated function vifelse(
m::AbstractMask{W},
vu1::VecUnroll{Nm1,Wsplit},
vu2::VecUnroll{Nm1,Wsplit}
) where {W,Wsplit,Nm1}
N = Nm1 + 1
@assert N * Wsplit == W
U = mask_type_symbol(Wsplit)
quote
$(Expr(:meta, :inline))
vifelse(vconvert(VecUnroll{$Nm1,$Wsplit,Bit,Mask{$Wsplit,$U}}, m), vu1, vu2)
end
end
@inline vmul(v::AbstractSIMDVector, m::AbstractMask) = vifelse(m, v, zero(v))
@inline vmul(m::AbstractMask, v::AbstractSIMDVector) = vifelse(m, v, zero(v))
@inline vmul(m1::AbstractMask, m2::AbstractMask) = m1 & m2
@inline vmul(v::AbstractSIMDVector, b::Bool) = b ? v : zero(v)
@inline vmul(b::Bool, v::AbstractSIMDVector) = b ? v : zero(v)
@inline vmul(v::VecUnroll{N,W,T}, b::Bool) where {N,W,T} = b ? v : zero(v)
@inline vmul(b::Bool, v::VecUnroll{N,W,T}) where {N,W,T} = b ? v : zero(v)
@inline vmul_fast(m1::Mask{W,U}, m2::Mask{W,U}) where {W,U} = m1 & m2
@static if Base.libllvm_version ≥ v"11"
"""
mask(::Union{StaticInt{W},Val{W}}, base, N)
mask(base::MM{W}, N)
The two arg (`base`, `N`) method takes a base (current index) and last index of a loop.
Idiomatic use for three-arg version may look like
```julia
using VectorizationBase
sp = stridedpointer(x);
for i ∈ 1:8:N
m = mask(Val(8), (MM{8}(i),), N) # if using an integer base, also needs a `Val` or `StaticInt` to indicate size.
v = vload(sp, (MM{8}(i),), m)
# do something with `v`
end
```
or, a full runnable example:
```julia
using VectorizationBase, SLEEFPirates
x = randn(117); y = similar(x);
function vexp!(y, x)
W = VectorizationBase.pick_vector_width(eltype(x));
L = length(y);
spx = stridedpointer(x); spy = stridedpointer(y);
i = MM(W, 1); # use an `MM` index.
while (m = mask(i,L); m !== VectorizationBase.zero_mask(W))
yᵢ = exp(vload(spx, (i,), m))
vstore!(spy, yᵢ, (i,), m)
i += W
end
end
vexp!(y, x)
@assert y ≈ exp.(x)
# A sum optimized for short vectors (e.g., 10-20 elements)
function simd_sum(x)
W = VectorizationBase.pick_vector_width(eltype(x));
L = length(x);
spx = stridedpointer(x);
i = MM(W, 1); # use an `MM` index.
s = VectorizationBase.vzero(W, eltype(x))
while (m = mask(i,L); m !== VectorizationBase.zero_mask(W))
s += vload(spx, (i,), m)
i += W
end
VectorizationBase.vsum(s)
end
# or
function simd_sum(x)
W = VectorizationBase.pick_vector_width(eltype(x));
L = length(x);
spx = stridedpointer(x);
i = MM(W, 1); # use an `MM` index.
s = VectorizationBase.vzero(W, eltype(x))
cond = true
m = mask(i,L)
while cond
s += vload(spx, (i,), m)
i += W
m = mask(i,L)
cond = m !== VectorizationBase.zero_mask(W)
end
VectorizationBase.vsum(s)
end
```
```julia
julia> VectorizationBase.mask(Val(8), 1, 6) # starting with `i = 1`, if vector is of length 6, 6 lanes are on
Mask{8,Bool}<1, 1, 1, 1, 1, 1, 0, 0>
julia> VectorizationBase.mask(Val(8), 81, 93) # if `i = 81` and the vector is of length 93, we want all lanes on.
Mask{8,Bool}<1, 1, 1, 1, 1, 1, 1, 1>
julia> VectorizationBase.mask(Val(8), 89, 93) # But after `i += 8`, we're at `i = 89`, and now want just 5 lanes on.
Mask{8,Bool}<1, 1, 1, 1, 1, 0, 0, 0>
```
"""
@generated function mask(
::Union{Val{W},StaticInt{W}},
base::T,
N::T
) where {W,T<:IntegerTypesHW}
# declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i64(i64 %base, i64 %n)
bits = 8sizeof(T)
typ = "i$(bits)"
decl = "declare <$W x i1> @llvm.get.active.lane.mask.v$(W)i1.$(typ)($(typ), $(typ))"
instrs = [
"%m = call <$W x i1> @llvm.get.active.lane.mask.v$(W)i1.$(typ)($(typ) %0, $(typ) %1)"
]
zext_mask!(instrs, 'm', W, 0)
push!(instrs, "ret i$(max(nextpow2(W),8)) %res.0")
args = [:base, :N]
call = llvmcall_expr(
decl,
join(instrs, "\n"),
mask_type_symbol(W),
:(Tuple{$T,$T}),
"i$(max(nextpow2(W),8))",
[typ, typ],
args,
true
)
Expr(
:block,
Expr(:meta, :inline),
:(EVLMask{$W}($call, ((N % UInt32) - (base % UInt32)) + 0x00000001))
)
end
@inline mask(i::MM{W}, N::T) where {W,T<:IntegerTypesHW} =
mask(Val{W}(), getfield(i, :i), N)
end
@generated function vcat(
m1::AbstractMask{_W1},
m2::AbstractMask{_W2}
) where {_W1,_W2}
if _W1 == _W2
W = _W1
else
W = _W1 + _W2
U = integer_of_bytes_symbol(W, true)
return Expr(
:block,
Expr(:meta, :inline),
:(Mask{$W}(((data(m1) % $U) << $_W2) | (data(m2) % $U)))
)
end
mtyp_input = "i$(max(8,nextpow2(W)))"
instrs = String[]
truncate_mask!(instrs, '0', W, 0)
truncate_mask!(instrs, '1', W, 1)
W2 = W + W
shuffmask = Vector{String}(undef, W2)
for w ∈ eachindex(shuffmask)
shuffmask[w] = string(w - 1)
end
mask = '<' * join(map(x -> string("i32 ", x), shuffmask), ", ") * '>'
push!(
instrs,
"%combinedmask = shufflevector <$W x i1> %mask.0, <$W x i1> %mask.1, <$(W2) x i32> $mask"
)
mtyp_output = "i$(max(8,nextpow2(W2)))"
zext_mask!(instrs, "combinedmask", W2, 1)
push!(instrs, "ret $mtyp_output %res.1")
instrj = join(instrs, "\n")
U = mask_type_symbol(W)
U2 = mask_type_symbol(W2)
Expr(
:block,
Expr(:meta, :inline),
:(Mask{$W2}(
$LLVMCALL($instrj, $U2, Tuple{$U,$U}, getfield(m1, :u), getfield(m2, :u))
))
)
end
# @inline function vcat(m1::AbstractMask{W}, m2::AbstractMask{W}) where {W}
# U = mask_type(Val(W))
# u1 = data(m1) % U
# u2 = data(m2) % U
# (u1 << W) | u2
# end
@inline ifelse(b::Bool, m1::Mask{W}, m2::Mask{W}) where {W} =
Mask{W}(Core.ifelse(b, getfield(m1, :u), getfield(m2, :u)))
@inline ifelse(b::Bool, m1::Mask{W}, m2::EVLMask{W}) where {W} =
Mask{W}(Core.ifelse(b, getfield(m1, :u), getfield(m2, :u)))
@inline ifelse(b::Bool, m1::EVLMask{W}, m2::Mask{W}) where {W} =
Mask{W}(Core.ifelse(b, getfield(m1, :u), getfield(m2, :u)))
@inline ifelse(b::Bool, m1::EVLMask{W}, m2::EVLMask{W}) where {W} = EVLMask{W}(
Core.ifelse(b, getfield(m1, :u), getfield(m2, :u)),
Core.ifelse(b, getfield(m1, :evl), getfield(m2, :evl))
)
@inline vconvert(::Type{<:AbstractMask{W}}, b::Bool) where {W} =
b ? max_mask(Val(W)) : zero_mask(Val(W))
@inline vconvert(::Type{Mask{W}}, b::Bool) where {W} =
b ? max_mask(Val(W)) : zero_mask(Val(W))
@inline vconvert(::Type{EVLMask{W}}, b::Bool) where {W} =
b ? max_mask(Val(W)) : zero_mask(Val(W))
@inline Base.max(x::AbstractMask, y::AbstractMask) = x | y
@inline Base.min(x::AbstractMask, y::AbstractMask) = x & y
@inline Base.FastMath.max_fast(x::AbstractMask, y::AbstractMask) = x | y
@inline Base.FastMath.min_fast(x::AbstractMask, y::AbstractMask) = x & y
@inline zero_mask(::Type{T}) where {T} = zero_mask(zero(T))
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 60264 | ####################################################################################################
###################################### Memory Addressing ###########################################
####################################################################################################
# Operation names and dispatch pipeline:
# `vload` and `vstore!` are the user API functions. They load from an `AbstractStridedPointer` and
# are indexed via a `::Tuple` or `Unroll{AU,F,N,AV,W,M,X,<:Tuple}`.
# These calls are forwarded to `_vload` and `_vstore!`, appending information like `register_size()`,
# whether the operations can be assumed to be aligned, and for `vstore!` whether to add alias scope
# metadata and a nontemporal hint (non-temporal requires alignment).
#
# The tuple (Cartesian) indices are then linearized and put in terms of the number of bytes, and
# forwarded to `__vload` and `__vstore!`.
#
# The name mangling was introduced to help with discoverability, and to mayke the dispatch chain clearer.
# `methods(vload)` and `methods(vstore!)` now return much fewer methods, so users have an easier time
# assessing the API.
"""
Unroll{AU,F,N,AV,W,M,X}(i::I)
- AU: Unrolled axis
- F: Factor, step size per unroll. If AU == AV, `F == W` means successive loads. `1` would mean offset by `1`, e.g. `x{1:8]`, `x[2:9]`, and `x[3:10]`.
- N: How many times is it unrolled
- AV: Vectorized axis # 0 means not vectorized, some sort of reduction
- W: vector width
- M: bitmask indicating whether each factor is masked
- X: stride between loads of vectors along axis `AV`.
- i::I - index
"""
struct Unroll{AU,F,N,AV,W,M,X,I}
i::I
end
@inline Unroll{AU,F,N,AV,W}(i::I) where {AU,F,N,AV,W,I} =
Unroll{AU,F,N,AV,W,zero(UInt),1,I}(i)
@inline Unroll{AU,F,N,AV,W,M}(i::I) where {AU,F,N,AV,W,M,I} =
Unroll{AU,F,N,AV,W,M,1,I}(i)
@inline Unroll{AU,F,N,AV,W,M,X}(i::I) where {AU,F,N,AV,W,M,X,I} =
Unroll{AU,F,N,AV,W,M,X,I}(i)
@inline data(u::Unroll) = getfield(u, :i)
const TupleIndex =
Union{Tuple,Unroll{<:Any,<:Any,<:Any,<:Any,<:Any,<:Any,<:Any,<:Tuple}}
@inline function linear_index(
ptr::AbstractStridedPointer,
u::Unroll{AU,F,N,AV,W,M,X,I}
) where {AU,F,N,AV,W,M,X,I<:TupleIndex}
p, i = linear_index(ptr, data(u))
# Unroll{AU,F,N,AV,W,M,typeof(i)}(i)
p, Unroll{AU,F,N,AV,W,M,X}(i)
end
unroll_params(::Type{Unroll{AU,F,N,AV,W,M,X,I}}) where {AU,F,N,AV,W,M,X,I} =
(AU, F, N, AV, W, M, X, I)
const NestedUnroll{W,AV,X,I,AUO,FO,NO,MO,AUI,FI,NI,MI} =
Unroll{AUO,FO,NO,AV,W,MO,X,Unroll{AUI,FI,NI,AV,W,MI,X,I}}
const VectorIndexCore{W} = Union{Vec{W},MM{W},Unroll{<:Any,<:Any,<:Any,<:Any,W}}
const VectorIndex{W} =
Union{VectorIndexCore{W},LazyMulAdd{<:Any,<:Any,<:VectorIndexCore{W}}}
const IntegerIndex = Union{IntegerTypes,LazyMulAdd{<:Any,<:Any,<:IntegerTypes}}
const Index = Union{IntegerIndex,VectorIndex}
const VectorIndexNoUnrollCore{W} = Union{Vec{W},MM{W}}
const VectorIndexNoUnroll{W} = Union{
VectorIndexNoUnrollCore{W},
LazyMulAdd{<:Any,<:Any,<:VectorIndexNoUnrollCore{W}}
}
const IndexNoUnroll = Union{IntegerIndex,VectorIndexNoUnroll}
# const BoolVec = Union{AbstractMask,VecUnroll{<:Any, <:Any, Bool, <: AbstractMask}}
const SCOPE_METADATA = """
!1 = !{!\"noaliasdomain\"}
!2 = !{!\"noaliasscope\", !1}
!3 = !{!2}
"""
const LOAD_SCOPE_FLAGS = ", !alias.scope !3";
const STORE_SCOPE_FLAGS = ", !noalias !3";
# use TBAA?
# const TBAA_STR = """
# !4 = !{!5, !5, i64 0}
# !5 = !{!"jtbaa_mutab", !6, i64 0}
# !6 = !{!"jtbaa_value", !7, i64 0}
# !7 = !{!"jtbaa_data", !8, i64 0}
# !8 = !{!"jtbaa", !9, i64 0}
# !9 = !{!"jtbaa"}
# """;
# const TBAA_FLAGS = ", !tbaa !4";
const TBAA_STR = const TBAA_FLAGS = ""
# const TBAA_STR = """
# !4 = !{!"jtbaa", !5, i64 0}
# !5 = !{!"jtbaa"}
# !6 = !{!"jtbaa_data", !4, i64 0}
# !7 = !{!8, !8, i64 0}
# !8 = !{!"jtbaa_arraybuf", !6, i64 0}
# """;
# const TBAA_FLAGS = ", !tbaa !7";
const LOAD_SCOPE_TBAA = SCOPE_METADATA * TBAA_STR
const LOAD_SCOPE_TBAA_FLAGS = LOAD_SCOPE_FLAGS * TBAA_FLAGS
"""
An omnibus offset constructor.
The general motivation for generating the memory addresses as LLVM IR rather than combining multiple lllvmcall Julia functions is
that we want to minimize the `inttoptr` and `ptrtoint` calculations as we go back and fourth. These can get in the way of some
optimizations, such as memory address calculations.
It is particulary import for `gather` and `scatter`s, as these functions take a `Vec{W,Ptr{T}}` argument to load/store a
`Vec{W,T}` to/from. If `sizeof(T) < sizeof(Int)`, converting the `<W x \$(typ)*` vectors of pointers in LLVM to integer
vectors as they're represented in Julia will likely make them too large to fit in a single register, splitting the operation
into multiple operations, forcing a corresponding split of the `Vec{W,T}` vector as well.
This would all be avoided by not promoting/widenting the `<W x \$(typ)>` into a vector of `Int`s.
For this last issue, an alternate workaround would be to wrap a `Vec` of 32-bit integers with a type that defines it as a pointer for use with
internal llvmcall functions, but I haven't really explored this optimization.
"""
function offset_ptr(
T_sym::Symbol,
ind_type::Symbol,
indargname::Char,
ibits::Int,
W::Int,
X::Int,
M::Int,
O::Int,
forgep::Bool,
rs::Int
)
sizeof_T = JULIA_TYPE_SIZE[T_sym]
i = 0
Morig = M
isbit = T_sym === :Bit
if isbit
typ = "i1"
vtyp = isone(W) ? typ : "<$W x i1>"
M = max(1, M >> 3)
O >>= 3
if !((isone(X) | iszero(X)) && (ind_type !== :Vec))
throw(
ArgumentError(
"indexing BitArrays with a vector not currently supported."
)
)
end
else
typ = LLVM_TYPES_SYM[T_sym]
vtyp = vtype(W, typ) # vtyp is dest type
end
instrs = String[]
if M == 0
ind_type = :StaticInt
elseif ind_type === :StaticInt
M = 0
end
if isone(W)
X = 1
end
if iszero(M)
tz = intlog2(sizeof_T)
tzf = sizeof_T
index_gep_typ = typ
else
tz = min(trailing_zeros(M), 3)
tzf = 1 << tz
index_gep_typ = ((tzf == sizeof_T) | iszero(M)) ? typ : "i$(tzf << 3)"
M >>= tz
end
# after this block, we will have a index_gep_typ pointer
if iszero(O)
push!(
instrs,
"%ptr.$(i) = inttoptr $(JULIAPOINTERTYPE) %0 to $(index_gep_typ)*"
)
i += 1
else # !iszero(O)
if !iszero(O & (tzf - 1)) # then index_gep_typ works for the constant offset
offset_gep_typ = "i8"
offset = O
else # then we need another intermediary
offset_gep_typ = index_gep_typ
offset = O >> tz
end
push!(
instrs,
"%ptr.$(i) = inttoptr $(JULIAPOINTERTYPE) %0 to $(offset_gep_typ)*"
)
i += 1
push!(
instrs,
"%ptr.$(i) = getelementptr inbounds $(offset_gep_typ), $(offset_gep_typ)* %ptr.$(i-1), i32 $(offset)"
)
i += 1
if forgep && iszero(M) && (iszero(X) || isone(X))
push!(
instrs,
"%ptr.$(i) = ptrtoint $(offset_gep_typ)* %ptr.$(i-1) to $(JULIAPOINTERTYPE)"
)
i += 1
return instrs, i
elseif offset_gep_typ != index_gep_typ
push!(
instrs,
"%ptr.$(i) = bitcast $(offset_gep_typ)* %ptr.$(i-1) to $(index_gep_typ)*"
)
i += 1
end
end
# will do final type conversion
if ind_type === :Vec
if isone(M)
indname = indargname
else
indname = "indname"
constmul = llvmconst(W, "i$(ibits) $M")
push!(
instrs,
"%$(indname) = mul nsw <$W x i$(ibits)> %$(indargname), $(constmul)"
)
end
push!(
instrs,
"%ptr.$(i) = getelementptr inbounds $(index_gep_typ), $(index_gep_typ)* %ptr.$(i-1), <$W x i$(ibits)> %$(indname)"
)
i += 1
if forgep
push!(
instrs,
"%ptr.$(i) = ptrtoint <$W x $index_gep_typ*> %ptr.$(i-1) to <$W x $JULIAPOINTERTYPE>"
)
i += 1
elseif index_gep_typ != vtyp
push!(
instrs,
"%ptr.$(i) = bitcast <$W x $index_gep_typ*> %ptr.$(i-1) to <$W x $typ*>"
)
i += 1
end
return instrs, i
end
if ind_type === :Integer
if isbit
if (Morig & 7) == 0 || ispow2(Morig)
if abs(Morig) ≥ 8
M = Morig >> 3
if M != 1
indname = "indname"
push!(instrs, "%$(indname) = mul nsw i$(ibits) %$(indargname), $M")
else
indname = indargname
end
else
shifter = 3 - intlog2(Morig)
push!(instrs, "%shiftedind = ashr i$(ibits) %$(indargname), $shifter")
if Morig > 0
indname = "shiftedind"
else
indname = "indname"
push!(instrs, "%$(indname) = mul i$(ibits) %shiftedind, -1")
end
end
else
throw(
ArgumentError(
"Scale factors on bit accesses must be an integer multiple of 8 or an integer power of 2."
)
)
indname = "0"
end
else
if isone(M)
indname = indargname
elseif iszero(M)
indname = "0"
else
indname = "indname"
push!(instrs, "%$(indname) = mul nsw i$(ibits) %$(indargname), $M")
end
# TODO: if X != 1 and X != 0, check if it is better to gep -> gep, or broadcast -> add -> gep
end
push!(
instrs,
"%ptr.$(i) = getelementptr inbounds $(index_gep_typ), $(index_gep_typ)* %ptr.$(i-1), i$(ibits) %$(indname)"
)
i += 1
end
# ind_type === :Integer || ind_type === :StaticInt
if !(isone(X) | iszero(X)) # vec
# LLVM assumes integers are signed for indexing
# therefore, we need to set the integer size to be at least `nextpow2(intlog2(X*W-1)+2)` bits
# to avoid overflow
vibytes = max(min(4, rs ÷ W), nextpow2(intlog2(X * W - 1) + 2) >> 3)
vityp = "i$(8vibytes)"
vi = join((X * w for w ∈ 0:W-1), ", $vityp ")
if typ !== index_gep_typ
push!(
instrs,
"%ptr.$(i) = bitcast $(index_gep_typ)* %ptr.$(i-1) to $(typ)*"
)
i += 1
end
push!(
instrs,
"%ptr.$(i) = getelementptr inbounds $(typ), $(typ)* %ptr.$(i-1), <$W x $(vityp)> <$vityp $vi>"
)
i += 1
if forgep
push!(
instrs,
"%ptr.$(i) = ptrtoint <$W x $typ*> %ptr.$(i-1) to <$W x $JULIAPOINTERTYPE>"
)
i += 1
end
return instrs, i
end
if forgep # if forgep, just return now
push!(
instrs,
"%ptr.$(i) = ptrtoint $(index_gep_typ)* %ptr.$(i-1) to $JULIAPOINTERTYPE"
)
i += 1
elseif index_gep_typ != vtyp
push!(
instrs,
"%ptr.$(i) = bitcast $(index_gep_typ)* %ptr.$(i-1) to $(vtyp)*"
)
i += 1
end
instrs, i
end
gep_returns_vector(W::Int, X::Int, M::Int, ind_type::Symbol) =
(!isone(W) && ((ind_type === :Vec) || !(isone(X) | iszero(X))))
#::Type{T}, ::Type{I}, W::Int = 1, ivec::Bool = false, constmul::Int = 1) where {T <: NativeTypes, I <: Integer}
function gep_quote(
::Type{T},
ind_type::Symbol,
::Type{I},
W::Int,
X::Int,
M::Int,
O::Int,
rs::Int
) where {T,I}
T_sym = JULIA_TYPES[T]
I_sym = JULIA_TYPES[I]
gep_quote(T_sym, ind_type, I_sym, W, X, M, O, rs)
end
function gep_quote(
T_sym::Symbol,
ind_type::Symbol,
I_sym::Symbol,
W::Int,
X::Int,
M::Int,
O::Int,
rs::Int
)
sizeof_T = JULIA_TYPE_SIZE[T_sym]
sizeof_I = JULIA_TYPE_SIZE[I_sym]
if W > 1 && ind_type !== :Vec
X, Xr = divrem(X, sizeof_T)
iszero(Xr) || throw(
ArgumentError(
"sizeof($T_sym) == $sizeof_T, but stride between vector loads is given as $X, which is not a positive integer multiple."
)
)
end
if iszero(O) &&
(iszero(X) | isone(X)) &&
(iszero(M) || ind_type === :StaticInt)
return Expr(:block, Expr(:meta, :inline), :ptr)
end
ibits = 8sizeof_I
# ::Type{T}, ind_type::Symbol, indargname = '1', ibytes::Int, W::Int = 1, X::Int = 1, M::Int = 1, O::Int = 0, forgep::Bool = false
instrs, i = offset_ptr(T_sym, ind_type, '1', ibits, W, X, M, O, true, rs)
ret = Expr(:curly, :Ptr, T_sym)
lret = JULIAPOINTERTYPE
if gep_returns_vector(W, X, M, ind_type)
ret = Expr(:curly, :_Vec, W, ret)
lret = "<$W x $lret>"
end
args = Expr(:curly, :Tuple, Expr(:curly, :Ptr, T_sym))
largs = String[JULIAPOINTERTYPE]
arg_syms = Union{Symbol,Expr}[:ptr]
if !(iszero(M) || ind_type === :StaticInt)
push!(arg_syms, Expr(:call, :data, :i))
if ind_type === :Integer
push!(args.args, I_sym)
push!(largs, "i$(ibits)")
else
push!(args.args, Expr(:curly, :_Vec, W, I_sym))
push!(largs, "<$W x i$(ibits)>")
end
end
push!(instrs, "ret $lret %ptr.$(i-1)")
llvmcall_expr("", join(instrs, "\n"), ret, args, lret, largs, arg_syms)
end
@generated function _gep(
ptr::Ptr{T},
i::I,
::StaticInt{RS}
) where {I<:IntegerTypes,T<:NativeTypes,RS}
gep_quote(T, :Integer, I, 1, 1, 1, 0, RS)
end
@generated function _gep(
ptr::Ptr{T},
::StaticInt{N},
::StaticInt{RS}
) where {N,T<:NativeTypes,RS}
gep_quote(T, :StaticInt, Int, 1, 1, 0, N, RS)
end
@generated function _gep(
ptr::Ptr{T},
i::LazyMulAdd{M,O,I},
::StaticInt{RS}
) where {T<:NativeTypes,I<:IntegerTypes,O,M,RS}
gep_quote(T, :Integer, I, 1, 1, M, O, RS)
end
@inline _gep(ptr, i::IntegerIndex, ::StaticInt) = ptr + _materialize(i)
@generated function _gep(
ptr::Ptr{T},
i::Vec{W,I},
::StaticInt{RS}
) where {W,T<:NativeTypes,I<:IntegerTypes,RS}
gep_quote(T, :Vec, I, W, 1, 1, 0, RS)
end
@generated function _gep(
ptr::Ptr{T},
i::LazyMulAdd{M,O,Vec{W,I}},
::StaticInt{RS}
) where {W,T<:NativeTypes,I<:IntegerTypes,M,O,RS}
gep_quote(T, :Vec, I, W, 1, M, O, RS)
end
@inline gep(ptr::Ptr, i) = _gep(ptr, i, register_size())
@inline increment_ptr(ptr::AbstractStridedPointer) = pointer(ptr)
@inline function increment_ptr(ptr::AbstractStridedPointer, i::Tuple)
ioffset = _offset_index(i, offsets(ptr))
p, li = tdot(ptr, ioffset, static_strides(ptr))
_gep(p, li, Zero())
end
@inline increment_ptr(p::StridedBitPointer) = offsets(p)
@inline bmap(::F, ::Tuple{}, y::Tuple{}) where {F} = ()
@inline bmap(::F, ::Tuple{}, y::Tuple) where {F} = ()
@inline bmap(::F, x::Tuple{X,Vararg{Any,K}}, ::Tuple{}) where {F,X,K} = x
@inline bmap(
f::F,
x::Tuple{X,Vararg{Any,KX}},
y::Tuple{Y,Vararg{Any,KY}}
) where {F,X,Y,KX,KY} =
(f(first(x), first(y)), bmap(f, Base.tail(x), Base.tail(y))...)
@inline increment_ptr(p::StridedBitPointer, i::Tuple) =
bmap(vsub_nsw, offsets(p), i)
@inline increment_ptr(p::AbstractStridedPointer, o, i::Tuple) =
increment_ptr(reconstruct_ptr(p, o), i)
@inline function reconstruct_ptr(sp::AbstractStridedPointer, p::Ptr)
similar_with_offset(sp, p, offsets(sp))
end
# @inline function reconstruct_ptr(sp::AbstractStridedPointer, p::Ptr)
# similar_no_offset(sp, p)
# end
@inline function reconstruct_ptr(
ptr::StridedBitPointer{N,C,B,R},
offs::NTuple{N,Int}
) where {N,C,B,R}
stridedpointer(
pointer(ptr),
ArrayInterface.StrideIndex{N,R,C}(static_strides(ptr), offs),
StaticInt{B}()
)
end
@inline function gesp(
ptr::AbstractStridedPointer,
i::Tuple{Vararg{IntegerIndex}}
)
similar_no_offset(ptr, increment_ptr(ptr, i))
end
@inline function gesp(
ptr::StridedBitPointer{N,C,B,R},
i::Tuple{Vararg{IntegerIndex,K}}
) where {N,C,B,R,K}
stridedpointer(
pointer(ptr),
ArrayInterface.StrideIndex{N,R,C}(
static_strides(ptr),
increment_ptr(ptr, i)
),
StaticInt{B}()
)
end
@inline vsub_nsw(::NullStep, _) = Zero()
@inline vsub_nsw(::NullStep, ::LazyMulAdd) = Zero() # avoid ambiguity
@inline vsub_nsw(::NullStep, ::NullStep) = Zero()
@inline vsub_nsw(::NullStep, ::StaticInt) = Zero()
@inline select_null_offset(i::Tuple{}, off::Tuple{}) = ()
@inline select_null_offset(i::Tuple{I1,Vararg}, off::Tuple{O1}) where {I1,O1} =
(Zero(),)
@inline select_null_offset(
i::Tuple{NullStep,Vararg},
off::Tuple{O1}
) where {O1} = (first(off),)
@inline select_null_offset(
i::Tuple{I1,I2,Vararg},
off::Tuple{O1,O2,Vararg}
) where {I1,I2,O1,O2} =
(Zero(), select_null_offset(Base.tail(i), Base.tail(off))...)
@inline select_null_offset(
i::Tuple{NullStep,I2,Vararg},
off::Tuple{O1,O2,Vararg}
) where {I2,O1,O2} =
(first(off), select_null_offset(Base.tail(i), Base.tail(off))...)
@inline function gesp(
ptr::AbstractStridedPointer,
i::Tuple{Vararg{Union{NullStep,IntegerIndex}}}
)
ioffset = _offset_index(i, offsets(ptr))
offs = select_null_offset(i, offsets(ptr))
similar_with_offset(ptr, gep(zero_offsets(ptr), ioffset), offs)
end
@inline gesp(
ptr::AbstractStridedPointer,
i::Tuple{NullStep,Vararg{NullStep,N}}
) where {N} = ptr
@inline gesp(ptr::AbstractStridedPointer, i::Tuple{Vararg{Any,N}}) where {N} =
gesp(ptr, Tuple(CartesianVIndex(i)))#flatten
function vload_quote(
::Type{T},
::Type{I},
ind_type::Symbol,
W::Int,
X::Int,
M::Int,
O::Int,
mask::Bool,
align::Bool,
rs::Int
) where {T<:NativeTypes,I<:Integer}
T_sym = JULIA_TYPES[T]
I_sym = JULIA_TYPES[I]
vload_quote(T_sym, I_sym, ind_type, W, X, M, O, mask, align, rs)
end
function vload_quote(
T_sym::Symbol,
I_sym::Symbol,
ind_type::Symbol,
W::Int,
X::Int,
M::Int,
O::Int,
mask::Bool,
align::Bool,
rs::Int
)
isbit = T_sym === :Bit
if !isbit && W > 1
sizeof_T = JULIA_TYPE_SIZE[T_sym]
if W * sizeof_T > rs
if ((T_sym === :Int64) | (T_sym === :UInt64)) && ((W * sizeof_T) == 2rs)
return vload_trunc_quote(
T_sym,
I_sym,
ind_type,
W,
X,
M,
O,
mask,
align,
rs,
:(_Vec{$W,$T_sym})
)
# else
# return vload_split_quote(W, sizeof_T, mask, align, rs, T_sym)
end
else
return vload_quote(
T_sym,
I_sym,
ind_type,
W,
X,
M,
O,
mask,
align,
rs,
:(_Vec{$W,$T_sym})
)
end
end
jtyp = isbit ? (isone(W) ? :Bool : mask_type_symbol(W)) : T_sym
vload_quote(T_sym, I_sym, ind_type, W, X, M, O, mask, align, rs, jtyp)
# jtyp_expr = Expr(:(.), :Base, QuoteNode(jtyp)) # reduce latency, hopefully
# vload_quote(T_sym, I_sym, ind_type, W, X, M, O, mask, align, rs, jtyp_expr)
end
function vload_trunc_quote(
T_sym::Symbol,
I_sym::Symbol,
ind_type::Symbol,
W::Int,
X::Int,
M::Int,
O::Int,
mask::Bool,
align::Bool,
rs::Int,
ret::Union{Symbol,Expr}
)
call = vload_quote_llvmcall(
T_sym,
I_sym,
ind_type,
W,
X,
M,
O,
mask,
align,
rs,
ret
)
call = Expr(:call, :%, call, Core.ifelse(T_sym === :Int64, :Int32, :UInt32))
Expr(:block, Expr(:meta, :inline), call)
end
# function vload_split_quote(W::Int, sizeof_T::Int, mask::Bool, align::Bool, rs::Int, T_sym::Symbol)
# D, r1 = divrem(W * sizeof_T, rs)
# Wnew, r2 = divrem(W, D)
# (iszero(r1) & iszero(r2)) || throw(ArgumentError("If loading more than a vector, Must load a multiple of the vector width."))
# q = Expr(:block,Expr(:meta,:inline))
# # ind_type = :StaticInt, :Integer, :Vec
# push!(q.args, :(isplit = splitvectortotuple(StaticInt{$D}(), StaticInt{$Wnew}(), i)))
# mask && push!(q.args, :(msplit = splitvectortotuple(StaticInt{$D}(), StaticInt{$Wnew}(), m)))
# t = Expr(:tuple)
# alignval = Expr(:call, align ? :True : :False)
# for d ∈ 1:D
# call = Expr(:call, :__vload, :ptr)
# push!(call.args, Expr(:ref, :isplit, d))
# mask && push!(call.args, Expr(:ref, :msplit, d))
# push!(call.args, alignval, Expr(:call, Expr(:curly, :StaticInt, rs)))
# v_d = Symbol(:v_, d)
# push!(q.args, Expr(:(=), v_d, call))
# push!(t.args, v_d)
# end
# push!(q.args, :(VecUnroll($t)::VecUnroll{$(D-1),$Wnew,$T_sym,Vec{$Wnew,$T_sym}}))
# q
# end
@inline function _mask_scalar_load(
ptr::Ptr{T},
i::IntegerIndex,
m::AbstractMask{1},
::A,
::StaticInt{RS}
) where {T,A,RS}
Bool(m) ? __vload(ptr, i, A(), StaticInt{RS}()) : zero(T)
end
@inline function _mask_scalar_load(
ptr::Ptr{T},
m::AbstractMask{1},
::A,
::StaticInt{RS}
) where {T,A,RS}
Bool(m) ? __vload(ptr, i, A(), StaticInt{RS}()) : zero(T)
end
@inline function _mask_scalar_load(
ptr::Ptr{T},
i::IntegerIndex,
m::AbstractMask{W},
::A,
::StaticInt{RS}
) where {T,A,RS,W}
s = __vload(ptr, i, A(), StaticInt{RS}())
ifelse(
m,
_vbroadcast(StaticInt{W}(), s, StaticInt{RS}()),
_vzero(StaticInt{W}(), T, StaticInt{RS}())
)
end
@inline function _mask_scalar_load(
ptr::Ptr{T},
m::AbstractMask{W},
::A,
::StaticInt{RS}
) where {T,A,RS,W}
s = __vload(ptr, A(), StaticInt{RS}())
ifelse(
m,
_vbroadcast(StaticInt{W}(), s, StaticInt{RS}()),
_vzero(StaticInt{W}(), T, StaticInt{RS}())
)
end
function vload_quote_llvmcall(
T_sym::Symbol,
I_sym::Symbol,
ind_type::Symbol,
W::Int,
X::Int,
M::Int,
O::Int,
mask::Bool,
align::Bool,
rs::Int,
ret::Union{Symbol,Expr}
)
if mask && W == 1
if M == O == 0
return quote
$(Expr(:meta, :inline))
_mask_scalar_load(ptr, m, $(align ? :True : :False)(), StaticInt{$rs}())
end
else
return quote
$(Expr(:meta, :inline))
_mask_scalar_load(
ptr,
i,
m,
$(align ? :True : :False)(),
StaticInt{$rs}()
)
end
end
end
decl, instrs, args, lret, largs, arg_syms = vload_quote_llvmcall_core(
T_sym,
I_sym,
ind_type,
W,
X,
M,
O,
mask,
align,
rs
)
return llvmcall_expr(
decl,
instrs,
ret,
args,
lret,
largs,
arg_syms,
true,
true
)
end
function vload_quote_llvmcall_core(
T_sym::Symbol,
I_sym::Symbol,
ind_type::Symbol,
W::Int,
X::Int,
M::Int,
O::Int,
mask::Bool,
align::Bool,
rs::Int
)
sizeof_T = JULIA_TYPE_SIZE[T_sym]
reverse_load = ((W > 1) & (X == -sizeof_T)) & (ind_type !== :Vec)
if reverse_load
X = sizeof_T
O -= (W - 1) * sizeof_T
end
# if (X == -sizeof_T) & (!mask)
# return quote
# $(Expr(:meta,:inline))
# vload(ptr, i
# end
# end
sizeof_I = JULIA_TYPE_SIZE[I_sym]
ibits = 8sizeof_I
if W > 1 && ind_type !== :Vec
X, Xr = divrem(X, sizeof_T)
iszero(Xr) || throw(
ArgumentError(
"sizeof($T_sym) == $sizeof_T, but stride between vector loads is given as $X, which is not a positive integer multiple."
)
)
end
instrs, i = offset_ptr(T_sym, ind_type, '1', ibits, W, X, M, O, false, rs)
grv = gep_returns_vector(W, X, M, ind_type)
# considers booleans to only occupy 1 bit in memory, so they must be handled specially
isbit = T_sym === :Bit
if isbit
# @assert !grv "gather's not are supported with `BitArray`s."
mask = false # TODO: not this?
# typ = "i$W"
alignment = (align & (!grv)) ? cld(W, 8) : 1
typ = "i1"
else
alignment =
(align & (!grv)) ? _get_alignment(W, T_sym) : _get_alignment(0, T_sym)
typ = LLVM_TYPES_SYM[T_sym]
end
decl = LOAD_SCOPE_TBAA
dynamic_index = !(iszero(M) || ind_type === :StaticInt)
vtyp = vtype(W, typ)
if mask
if reverse_load
decl *= truncate_mask!(instrs, '1' + dynamic_index, W, 0, true) * "\n"
else
truncate_mask!(instrs, '1' + dynamic_index, W, 0, false)
end
end
if grv
loadinstr =
"$vtyp @llvm.masked.gather." *
suffix(W, T_sym) *
'.' *
ptr_suffix(W, T_sym)
decl *= "declare $loadinstr(<$W x $typ*>, i32, <$W x i1>, $vtyp)"
m = mask ? m = "%mask.0" : llvmconst(W, "i1 1")
passthrough = mask ? "zeroinitializer" : "undef"
push!(
instrs,
"%res = call $loadinstr(<$W x $typ*> %ptr.$(i-1), i32 $alignment, <$W x i1> $m, $vtyp $passthrough)" *
LOAD_SCOPE_TBAA_FLAGS
)
elseif mask
suff = suffix(W, T_sym)
loadinstr = "$vtyp @llvm.masked.load." * suff * ".p0" * suff
decl *= "declare $loadinstr($vtyp*, i32, <$W x i1>, $vtyp)"
push!(
instrs,
"%res = call $loadinstr($vtyp* %ptr.$(i-1), i32 $alignment, <$W x i1> %mask.0, $vtyp zeroinitializer)" *
LOAD_SCOPE_TBAA_FLAGS
)
else
push!(
instrs,
"%res = load $vtyp, $vtyp* %ptr.$(i-1), align $alignment" *
LOAD_SCOPE_TBAA_FLAGS
)
end
if isbit
lret = string('i', max(8, nextpow2(W)))
if W > 1
if reverse_load
# isbit means mask is set to false, so we definitely need to declare `bitreverse`
bitreverse = "i$(W) @llvm.bitreverse.i$(W)(i$(W))"
decl *= "declare $bitreverse"
resbit = "resbitreverse"
push!(instrs, "%$(resbit) = call $bitreverse(i$(W) %res")
else
resbit = "res"
end
if W < 8
push!(instrs, "%resint = bitcast <$W x i1> %$(resbit) to i$(W)")
push!(instrs, "%resfinal = zext i$(W) %resint to i8")
elseif ispow2(W)
push!(instrs, "%resfinal = bitcast <$W x i1> %$(resbit) to i$(W)")
else
Wpow2 = nextpow2(W)
push!(instrs, "%resint = bitcast <$W x i1> %$(resbit) to i$(W)")
push!(instrs, "%resfinal = zext i$(W) %resint to i$(Wpow2)")
end
else
push!(instrs, "%resfinal = zext i1 %res to i8")
end
push!(instrs, "ret $lret %resfinal")
else
lret = vtyp
if reverse_load
reversemask = '<' * join(map(x -> string("i32 ", W - x), 1:W), ", ") * '>'
push!(
instrs,
"%resreversed = shufflevector $vtyp %res, $vtyp undef, <$W x i32> $reversemask"
)
push!(instrs, "ret $vtyp %resreversed")
else
push!(instrs, "ret $vtyp %res")
end
end
args = Expr(:curly, :Tuple, Expr(:curly, :Ptr, T_sym))
largs = String[JULIAPOINTERTYPE]
arg_syms = Union{Symbol,Expr}[:ptr]
if dynamic_index
push!(arg_syms, :(data(i)))
if ind_type === :Integer
push!(args.args, I_sym)
push!(largs, "i$(ibits)")
else
push!(args.args, :(_Vec{$W,$I_sym}))
push!(largs, "<$W x i$(ibits)>")
end
end
if mask
push!(arg_syms, :(data(m)))
push!(args.args, mask_type(nextpow2(W)))
push!(largs, "i$(max(8,nextpow2(W)))")
end
return decl, join(instrs, "\n"), args, lret, largs, arg_syms
end
function vload_quote(
T_sym::Symbol,
I_sym::Symbol,
ind_type::Symbol,
W::Int,
X::Int,
M::Int,
O::Int,
mask::Bool,
align::Bool,
rs::Int,
ret::Union{Symbol,Expr}
)
call = vload_quote_llvmcall(
T_sym,
I_sym,
ind_type,
W,
X,
M,
O,
mask,
align,
rs,
ret
)
if (W > 1) & (T_sym === :Bit)
call = Expr(:call, Expr(:curly, :Mask, W), call)
end
Expr(:block, Expr(:meta, :inline), call)
end
# vload_quote(T, ::Type{I}, ind_type::Symbol, W::Int, X, M, O, mask, align = false)
# ::Type{T}, ::Type{I}, ind_type::Symbol, W::Int, X::Int, M::Int, O::Int, mask::Bool, align::Bool, rs::Int
function index_summary(::Type{StaticInt{N}}) where {N}
#I, ind_type, W, X, M, O
Int, :StaticInt, 1, 1, 0, N
end
function index_summary(::Type{I}) where {I<:IntegerTypesHW}
#I, ind_type, W, X, M, O
I, :Integer, 1, 1, 1, 0
end
function index_summary(::Type{Vec{W,I}}) where {W,I<:IntegerTypes}
#I, ind_type, W, X, M, O
I, :Vec, W, 1, 1, 0
end
function index_summary(::Type{MM{W,X,I}}) where {W,X,I<:IntegerTypes}
#I, ind_type, W, X, M, O
IT, ind_type, _, __, M, O = index_summary(I)
# inherit from parent, replace `W` and `X`
IT, ind_type, W, X, M, O
end
function index_summary(
::Type{LazyMulAdd{LMAM,LMAO,LMAI}}
) where {LMAM,LMAO,LMAI}
I, ind_type, W, X, M, O = index_summary(LMAI)
I, ind_type, W, X * LMAM, M * LMAM, LMAO + O * LMAM
end
# no index, no mask
@generated function __vload(
ptr::Ptr{T},
::A,
::StaticInt{RS}
) where {T<:NativeTypes,A<:StaticBool,RS}
vload_quote(T, Int, :StaticInt, 1, 1, 0, 0, false, A === True, RS)
end
# no index, mask
@generated function __vload(
ptr::Ptr{T},
::A,
m::AbstractMask,
::StaticInt{RS}
) where {T<:NativeTypesExceptFloat16,A<:StaticBool,RS}
vload_quote(T, Int, :StaticInt, 1, 1, 0, 0, true, A === True, RS)
end
# index, no mask
@generated function __vload(
ptr::Ptr{T},
i::I,
::A,
::StaticInt{RS}
) where {A<:StaticBool,T<:NativeTypes,I<:Index,RS}
IT, ind_type, W, X, M, O = index_summary(I)
vload_quote(T, IT, ind_type, W, X, M, O, false, A === True, RS)
end
# index, mask
@generated function __vload(
ptr::Ptr{T},
i::I,
m::AbstractMask,
::A,
::StaticInt{RS}
) where {A<:StaticBool,T<:NativeTypesExceptFloat16,I<:Index,RS}
IT, ind_type, W, X, M, O = index_summary(I)
vload_quote(T, IT, ind_type, W, X, M, O, true, A === True, RS)
end
# Float16 with mask
@inline function __vload(
ptr::Ptr{Float16},
::A,
m::AbstractMask,
::StaticInt{RS}
) where {A<:StaticBool,RS}
reinterpret(
Float16,
__vload(reinterpret(Ptr{Int16}, ptr), A(), m, StaticInt{RS}())
)
end
@inline function __vload(
ptr::Ptr{Float16},
i::I,
m::AbstractMask,
::A,
::StaticInt{RS}
) where {A<:StaticBool,I<:Index,RS}
reinterpret(
Float16,
__vload(reinterpret(Ptr{Int16}, ptr), i, m, A(), StaticInt{RS}())
)
end
@inline function _vload_scalar(
ptr::Ptr{Bit},
i::Union{Integer,StaticInt},
::A,
::StaticInt{RS}
) where {RS,A<:StaticBool}
d = i >> 3
r = i & 7
u = __vload(Base.unsafe_convert(Ptr{UInt8}, ptr), d, A(), StaticInt{RS}())
(u >> r) % Bool
end
@inline function __vload(
ptr::Ptr{Bit},
i::IntegerTypesHW,
::A,
::StaticInt{RS}
) where {A<:StaticBool,RS}
_vload_scalar(ptr, i, A(), StaticInt{RS}())
end
# avoid ambiguities
@inline __vload(
ptr::Ptr{Bit},
::StaticInt{N},
::A,
::StaticInt{RS}
) where {N,A<:StaticBool,RS} =
_vload_scalar(ptr, StaticInt{N}(), A(), StaticInt{RS}())
# Entry points, `vload` and `vloada`
# No index, so forward straight to `__vload`
@inline vload(ptr::AbstractStridedPointer) =
__vload(ptr, False(), register_size())
@inline vloada(ptr::AbstractStridedPointer) =
__vload(ptr, True(), register_size())
# Index, so forward to `_vload` to linearize.
@inline vload(ptr::AbstractStridedPointer, i::Union{Tuple,Unroll}) =
_vload(ptr, i, False(), register_size())
@inline vloada(ptr::AbstractStridedPointer, i::Union{Tuple,Unroll}) =
_vload(ptr, i, True(), register_size())
@inline vload(
ptr::AbstractStridedPointer,
i::Union{Tuple,Unroll},
m::Union{AbstractMask,Bool,VecUnroll}
) = _vload(ptr, i, m, False(), register_size())
@inline vloada(
ptr::AbstractStridedPointer,
i::Union{Tuple,Unroll},
m::Union{AbstractMask,Bool,VecUnroll}
) = _vload(ptr, i, m, True(), register_size())
@inline function __vload(
ptr::Ptr{T},
i::Number,
b::Bool,
::A,
::StaticInt{RS}
) where {T,A<:StaticBool,RS}
b ? __vload(ptr, i, A(), StaticInt{RS}()) : zero(T)
end
@inline vwidth_from_ind(i::Tuple) =
vwidth_from_ind(i, StaticInt(1), StaticInt(0))
@inline vwidth_from_ind(
i::Tuple{},
::StaticInt{W},
::StaticInt{U}
) where {W,U} = (StaticInt{W}(), StaticInt{U}())
@inline function vwidth_from_ind(
i::Tuple{<:AbstractSIMDVector{W},Vararg},
::Union{StaticInt{1},StaticInt{W}},
::StaticInt{U}
) where {W,U}
vwidth_from_ind(Base.tail(i), StaticInt{W}(), StaticInt{W}(U))
end
@inline function vwidth_from_ind(
i::Tuple{<:VecUnroll{U,W},Vararg},
::Union{StaticInt{1},StaticInt{W}},
::Union{StaticInt{0},StaticInt{U}}
) where {U,W}
vwidth_from_ind(Base.tail(i), StaticInt{W}(), StaticInt{W}(U))
end
@inline function _vload(
ptr::Ptr{T},
i::Tuple,
b::Bool,
::A,
::StaticInt{RS}
) where {T,A<:StaticBool,RS}
if b
_vload(ptr, i, A(), StaticInt{RS}())
else
zero_init(T, vwidth_from_ind(i), StaticInt{RS}())
end
end
@inline function _vload(
ptr::Ptr{T},
i::Unroll{AU,F,N,AV,W,M,X,I},
b::Bool,
::A,
::StaticInt{RS}
) where {T,AU,F,N,AV,W,M,X,I,A<:StaticBool,RS}
m = max_mask(Val{W}()) & b
_vload(ptr, i, m, A(), StaticInt{RS}())
end
@inline function __vload(
ptr::Ptr{T},
i::I,
m::Bool,
::A,
::StaticInt{RS}
) where {T,A<:StaticBool,RS,I<:IntegerIndex}
m ? __vload(ptr, i, A(), StaticInt{RS}()) : zero(T)
end
@inline function __vload(
ptr::Ptr{T},
i::I,
m::Bool,
::A,
::StaticInt{RS}
) where {T,A<:StaticBool,RS,W,I<:VectorIndex{W}}
_m = max_mask(Val{W}()) & m
__vload(ptr, i, _m, A(), StaticInt{RS}())
end
function vstore_quote(
::Type{T},
::Type{I},
ind_type::Symbol,
W::Int,
X::Int,
M::Int,
O::Int,
mask::Bool,
align::Bool,
noalias::Bool,
nontemporal::Bool,
rs::Int
) where {T<:NativeTypes,I<:Integer}
T_sym = JULIA_TYPES[T]
I_sym = JULIA_TYPES[I]
vstore_quote(
T_sym,
I_sym,
ind_type,
W,
X,
M,
O,
mask,
align,
noalias,
nontemporal,
rs
)
end
function vstore_quote(
T_sym::Symbol,
I_sym::Symbol,
ind_type::Symbol,
W::Int,
X::Int,
M::Int,
O::Int,
mask::Bool,
align::Bool,
noalias::Bool,
nontemporal::Bool,
rs::Int
)
sizeof_T = JULIA_TYPE_SIZE[T_sym]
sizeof_I = JULIA_TYPE_SIZE[I_sym]
ibits = 8sizeof_I
reverse_store =
(((W > 1) & (X == -sizeof_T)) & (ind_type !== :Vec)) & (T_sym ≢ :Bit) # TODO: check if `T_sym` can actually be `Bit`; don't we cast?
if reverse_store
X = sizeof_T
O -= (W - 1) * sizeof_T
end
if W > 1 && ind_type !== :Vec
X, Xr = divrem(X, sizeof_T)
iszero(Xr) || throw(
ArgumentError(
"sizeof($T_sym) == $sizeof_T, but stride between vector loads is given as $X, which is not a positive integer multiple."
)
)
end
instrs, i = offset_ptr(T_sym, ind_type, '2', ibits, W, X, M, O, false, rs)
grv = gep_returns_vector(W, X, M, ind_type)
align != nontemporal # should I do this?
alignment =
(align & (!grv)) ? _get_alignment(W, T_sym) : _get_alignment(0, T_sym)
decl = noalias ? SCOPE_METADATA * TBAA_STR : TBAA_STR
metadata = noalias ? STORE_SCOPE_FLAGS * TBAA_FLAGS : TBAA_FLAGS
dynamic_index = !(iszero(M) || ind_type === :StaticInt)
typ = LLVM_TYPES_SYM[T_sym]
lret = vtyp = vtype(W, typ)
if mask
if reverse_store
decl *= truncate_mask!(instrs, '2' + dynamic_index, W, 0, true) * "\n"
else
truncate_mask!(instrs, '2' + dynamic_index, W, 0, false)
end
end
if reverse_store
reversemask = '<' * join(map(x -> string("i32 ", W - x), 1:W), ", ") * '>'
push!(
instrs,
"%argreversed = shufflevector $vtyp %1, $vtyp undef, <$W x i32> $reversemask"
)
argtostore = "%argreversed"
else
argtostore = "%1"
end
if grv
storeinstr =
"void @llvm.masked.scatter." *
suffix(W, T_sym) *
'.' *
ptr_suffix(W, T_sym)
decl *= "declare $storeinstr($vtyp, <$W x $typ*>, i32, <$W x i1>)"
m = mask ? m = "%mask.0" : llvmconst(W, "i1 1")
push!(
instrs,
"call $storeinstr($vtyp $(argtostore), <$W x $typ*> %ptr.$(i-1), i32 $alignment, <$W x i1> $m)" *
metadata
)
# push!(instrs, "call $storeinstr($vtyp $(argtostore), <$W x $typ*> %ptr.$(i-1), i32 $alignment, <$W x i1> $m)")
elseif mask
suff = suffix(W, T_sym)
storeinstr = "void @llvm.masked.store." * suff * ".p0" * suff
decl *= "declare $storeinstr($vtyp, $vtyp*, i32, <$W x i1>)"
push!(
instrs,
"call $storeinstr($vtyp $(argtostore), $vtyp* %ptr.$(i-1), i32 $alignment, <$W x i1> %mask.0)" *
metadata
)
elseif nontemporal
push!(
instrs,
"store $vtyp $(argtostore), $vtyp* %ptr.$(i-1), align $alignment, !nontemporal !{i32 1}" *
metadata
)
else
push!(
instrs,
"store $vtyp $(argtostore), $vtyp* %ptr.$(i-1), align $alignment" *
metadata
)
end
push!(instrs, "ret void")
ret = :Cvoid
lret = "void"
ptrtyp = Expr(:curly, :Ptr, T_sym)
args = if W > 1
Expr(
:curly,
:Tuple,
ptrtyp,
Expr(:curly, :NTuple, W, Expr(:curly, :VecElement, T_sym))
)
else
Expr(:curly, :Tuple, ptrtyp, T_sym)
end
largs = String[JULIAPOINTERTYPE, vtyp]
arg_syms = Union{Symbol,Expr}[:ptr, Expr(:call, :data, :v)]
if dynamic_index
push!(arg_syms, :(data(i)))
if ind_type === :Integer
push!(args.args, I_sym)
push!(largs, "i$(ibits)")
else
push!(args.args, :(_Vec{$W,$I_sym}))
push!(largs, "<$W x i$(ibits)>")
end
end
if mask
push!(arg_syms, :(data(m)))
push!(args.args, mask_type(W))
push!(largs, "i$(max(8,nextpow2(W)))")
end
llvmcall_expr(
decl,
join(instrs, "\n"),
ret,
args,
lret,
largs,
arg_syms,
false,
true
)
end
# no index, no mask, scalar store
@generated function __vstore!(
ptr::Ptr{T},
v::VT,
::A,
::S,
::NT,
::StaticInt{RS}
) where {
T<:NativeTypesExceptBit,
VT<:NativeTypes,
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS
}
if VT !== T
return Expr(
:block,
Expr(:meta, :inline),
:(__vstore!(ptr, convert($T, v), $A(), $S(), $NT(), StaticInt{$RS}()))
)
end
vstore_quote(
T,
Int,
:StaticInt,
1,
1,
0,
0,
false,
A === True,
S === True,
NT === True,
RS
)
end
# no index, no mask, vector store
@generated function __vstore!(
ptr::Ptr{T},
v::V,
::A,
::S,
::NT,
::StaticInt{RS}
) where {
T<:NativeTypesExceptBit,
W,
VT<:NativeTypes,
V<:AbstractSIMDVector{W,VT},
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS
}
if V !== Vec{W,T}
return Expr(
:block,
Expr(:meta, :inline),
:(__vstore!(
ptr,
convert(Vec{$W,$T}, v),
$A(),
$S(),
$NT(),
StaticInt{$RS}()
))
)
end
vstore_quote(
T,
Int,
:StaticInt,
W,
sizeof(T),
0,
0,
false,
A === True,
S === True,
NT === True,
RS
)
end
# index, no mask, scalar store
@generated function __vstore!(
ptr::Ptr{T},
v::VT,
i::I,
::A,
::S,
::NT,
::StaticInt{RS}
) where {
T<:NativeTypesExceptBit,
VT<:NativeTypes,
I<:Index,
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS
}
IT, ind_type, W, X, M, O = index_summary(I)
if VT !== T || W > 1
if W > 1
return Expr(
:block,
Expr(:meta, :inline),
:(__vstore!(
ptr,
convert(Vec{$W,$T}, v),
i,
$A(),
$S(),
$NT(),
StaticInt{$RS}()
))
)
else
return Expr(
:block,
Expr(:meta, :inline),
:(__vstore!(
ptr,
convert($T, v),
i,
$A(),
$S(),
$NT(),
StaticInt{$RS}()
))
)
end
end
vstore_quote(
T,
IT,
ind_type,
W,
X,
M,
O,
false,
A === True,
S === True,
NT === True,
RS
)
end
# index, no mask, vector store
@generated function __vstore!(
ptr::Ptr{T},
v::V,
i::I,
::A,
::S,
::NT,
::StaticInt{RS}
) where {
T<:NativeTypesExceptBit,
W,
VT<:NativeTypes,
V<:AbstractSIMDVector{W,VT},
I<:Index,
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS
}
if V !== Vec{W,T}
return Expr(
:block,
Expr(:meta, :inline),
:(__vstore!(
ptr,
convert(Vec{$W,$T}, v),
i,
$A(),
$S(),
$NT(),
StaticInt{$RS}()
))
)
end
IT, ind_type, _W, X, M, O = index_summary(I)
# don't want to require vector indices...
(W == _W || _W == 1) || throw(
ArgumentError(
"Vector width: $W, index width: $(_W). They must either be equal, or index width == 1."
)
)
if (W != _W) & (_W == 1)
X *= sizeof(T)
end
vstore_quote(
T,
IT,
ind_type,
W,
X,
M,
O,
false,
A === True,
S === True,
NT === True,
RS
)
end
# index, mask, scalar store
@generated function __vstore!(
ptr::Ptr{T},
v::VT,
i::I,
m::AbstractMask{W},
::A,
::S,
::NT,
::StaticInt{RS}
) where {
W,
T<:NativeTypesExceptBit,
VT<:NativeTypes,
I<:Index,
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS
}
IT, ind_type, _W, X, M, O = index_summary(I)
(W == _W || _W == 1) || throw(
ArgumentError(
"Vector width: $W, index width: $(_W). They must either be equal, or index width == 1."
)
)
if W == 1
return Expr(
:block,
Expr(:meta, :inline),
:(
Bool(m) && __vstore!(
ptr,
convert($T, v),
data(i),
$A(),
$S(),
$NT(),
StaticInt{$RS}()
)
)
)
else
return Expr(
:block,
Expr(:meta, :inline),
:(__vstore!(
ptr,
convert(Vec{$W,$T}, v),
i,
m,
$A(),
$S(),
$NT(),
StaticInt{$RS}()
))
)
end
# vstore_quote(T, IT, ind_type, W, X, M, O, true, A===True, S===True, NT===True, RS)
end
# no index, mask, vector store
@generated function __vstore!(
ptr::Ptr{T},
v::V,
m::AbstractMask{W},
::A,
::S,
::NT,
::StaticInt{RS}
) where {
T<:NativeTypesExceptBit,
W,
VT<:NativeTypes,
V<:AbstractSIMDVector{W,VT},
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS
}
if W == 1
return Expr(
:block,
Expr(:meta, :inline),
:(
Bool(m) && __vstore!(
ptr,
convert($T, v),
data(i),
$A(),
$S(),
$NT(),
StaticInt{$RS}()
)
)
)
elseif V !== Vec{W,T}
return Expr(
:block,
Expr(:meta, :inline),
:(__vstore!(
ptr,
convert(Vec{$W,$T}, v),
m,
$A(),
$S(),
$NT(),
StaticInt{$RS}()
))
)
end
vstore_quote(
T,
Int,
:StaticInt,
W,
sizeof(T),
0,
0,
true,
A === True,
S === True,
NT === True,
RS
)
end
# index, mask, vector store
@generated function __vstore!(
ptr::Ptr{T},
v::V,
i::I,
m::AbstractMask{W},
::A,
::S,
::NT,
::StaticInt{RS}
) where {
T<:NativeTypesExceptBit,
W,
VT<:NativeTypes,
V<:AbstractSIMDVector{W,VT},
I<:Index,
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS
}
if W == 1
return Expr(
:block,
Expr(:meta, :inline),
:(
Bool(m) && __vstore!(
ptr,
convert($T, v),
data(i),
$A(),
$S(),
$NT(),
StaticInt{$RS}()
)
)
)
elseif V !== Vec{W,T}
return Expr(
:block,
Expr(:meta, :inline),
:(__vstore!(
ptr,
convert(Vec{$W,$T}, v),
i,
m,
$A(),
$S(),
$NT(),
StaticInt{$RS}()
))
)
end
IT, ind_type, _W, X, M, O = index_summary(I)
(W == _W || _W == 1) || throw(
ArgumentError(
"Vector width: $W, index width: $(_W). They must either be equal, or index width == 1."
)
)
if (W != _W) & (_W == 1)
X *= sizeof(T)
end
vstore_quote(
T,
IT,
ind_type,
W,
X,
M,
O,
true,
A === True,
S === True,
NT === True,
RS
)
end
# no index, mask, vector store
@generated function __vstore!(
ptr::Ptr{Float16},
v::V,
m::AbstractMask{W},
::A,
::S,
::NT,
::StaticInt{RS}
) where {
W,
V<:AbstractSIMDVector{W,Float16},
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS
}
__vstore!(
reinterpret(Ptr{Int16}, ptr),
reinterpret(Int16, v),
m,
A(),
S(),
NT(),
StaticInt{RS}()
)
end
# index, mask, vector store
@inline function __vstore!(
ptr::Ptr{Float16},
v::V,
i::I,
m::AbstractMask{W},
::A,
::S,
::NT,
::StaticInt{RS}
) where {
W,
V<:AbstractSIMDVector{W,Float16},
I<:Index,
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS
}
__vstore!(
reinterpret(Ptr{Int16}, ptr),
reinterpret(Int16, v),
i,
m,
A(),
S(),
NT(),
StaticInt{RS}()
)
end
# BitArray stores
@inline function __vstore!(
ptr::Ptr{Bit},
v::AbstractSIMDVector{W,B},
::A,
::S,
::NT,
::StaticInt{RS}
) where {B<:Union{Bit,Bool},W,A<:StaticBool,S<:StaticBool,NT<:StaticBool,RS}
__vstore!(
Base.unsafe_convert(Ptr{mask_type(StaticInt{W}())}, ptr),
tounsigned(v),
A(),
S(),
NT(),
StaticInt{RS}()
)
end
@inline bytetobit(x::Union{Vec,VecUnroll}) = x >> 3
@inline bytetobit(x::Union{MM,LazyMulAdd}) = data(x) >> 3
@inline bytetobit(x::Union{MM,LazyMulAdd,Unroll}) = data(x) >> 3
@inline function __vstore!(
ptr::Ptr{Bit},
v::AbstractSIMDVector{W,B},
i::VectorIndex{W},
::A,
::S,
::NT,
::StaticInt{RS}
) where {B<:Union{Bit,Bool},W,A<:StaticBool,S<:StaticBool,NT<:StaticBool,RS}
__vstore!(
Base.unsafe_convert(Ptr{mask_type(StaticInt{W}())}, ptr),
tounsigned(v),
bytetobit(i),
A(),
S(),
NT(),
StaticInt{RS}()
)
end
@inline function __vstore!(
ptr::Ptr{Bit},
v::AbstractSIMDVector{W,B},
i::VectorIndex{W},
m::AbstractMask,
::A,
::S,
::NT,
::StaticInt{RS}
) where {B<:Union{Bit,Bool},W,A<:StaticBool,S<:StaticBool,NT<:StaticBool,RS}
ishift = data(i) >> 3
p = Base.unsafe_convert(Ptr{mask_type(StaticInt{W}())}, ptr)
u =
bitselect(data(m), __vload(p, ishift, A(), StaticInt{RS}()), tounsigned(v))
__vstore!(p, u, ishift, A(), S(), NT(), StaticInt{RS}())
end
@inline function __vstore!(
f::F,
ptr::Ptr{Bit},
v::AbstractSIMDVector{W,B},
::A,
::S,
::NT,
::StaticInt{RS}
) where {
B<:Union{Bit,Bool},
F<:Function,
W,
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS
}
__vstore!(
f,
Base.unsafe_convert(Ptr{mask_type(StaticInt{W}())}, ptr),
tounsigned(v),
A(),
S(),
NT(),
StaticInt{RS}()
)
end
@inline function __vstore!(
f::F,
ptr::Ptr{Bit},
v::AbstractSIMDVector{W,B},
i::VectorIndex{W},
::A,
::S,
::NT,
::StaticInt{RS}
) where {
W,
B<:Union{Bit,Bool},
F<:Function,
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS
}
__vstore!(
f,
Base.unsafe_convert(Ptr{mask_type(StaticInt{W}())}, ptr),
tounsigned(v),
data(i) >> 3,
A(),
S(),
NT(),
StaticInt{RS}()
)
end
@inline function __vstore!(
f::F,
ptr::Ptr{Bit},
v::AbstractSIMDVector{W,B},
i::VectorIndex{W},
m::AbstractMask,
::A,
::S,
::NT,
::StaticInt{RS}
) where {
W,
B<:Union{Bit,Bool},
F<:Function,
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS
}
ishift = data(i) >> 3
p = Base.unsafe_convert(Ptr{mask_type(StaticInt{W}())}, ptr)
u =
bitselect(data(m), __vload(p, ishift, A(), StaticInt{RS}()), tounsigned(v))
__vstore!(f, p, u, ishift, A(), S(), NT(), StaticInt{RS}())
end
# Can discard `f` if we have a vector index
@inline function __vstore!(
f::F,
ptr::Ptr{T},
v::AbstractSIMDVector{W},
::A,
::S,
::NT,
::StaticInt{RS}
) where {
T<:NativeTypesExceptBit,
F<:Function,
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS,
W
}
__vstore!(ptr, f(v), A(), S(), NT(), StaticInt{RS}())
end
@inline function __vstore!(
f::F,
ptr::Ptr{T},
v::AbstractSIMDVector{W},
i::IntegerIndex,
::A,
::S,
::NT,
::StaticInt{RS}
) where {
T<:NativeTypesExceptBit,
F<:Function,
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS,
W
}
__vstore!(ptr, f(v), i, A(), S(), NT(), StaticInt{RS}())
end
@inline function __vstore!(
f::F,
ptr::Ptr{T},
v::AbstractSIMDVector{W},
i::VectorIndex{W},
::A,
::S,
::NT,
::StaticInt{RS}
) where {
W,
T<:NativeTypesExceptBit,
F<:Function,
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS
}
# __vstore!(ptr, convert(Vec{W,T}, v), i, A(), S(), NT(), StaticInt{RS}()) # discard `f`
__vstore!(ptr, v, i, A(), S(), NT(), StaticInt{RS}()) # discard `f`
end
@inline function __vstore!(
f::F,
ptr::Ptr{T},
v::AbstractSIMDVector{W},
i::VectorIndex{W},
m::AbstractMask{W},
::A,
::S,
::NT,
::StaticInt{RS}
) where {
W,
T<:NativeTypesExceptBit,
F<:Function,
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS
}
# __vstore!(ptr, convert(Vec{W,T}, v), i, m, A(), S(), NT(), StaticInt{RS}())
__vstore!(ptr, f(v), i, m, A(), S(), NT(), StaticInt{RS}())
end
@inline function __vstore!(
f::F,
ptr::Ptr,
v::AbstractSIMDVector,
i::Index,
m::Bool,
::A,
::S,
::NT,
::StaticInt{RS}
) where {F<:Function,A<:StaticBool,S<:StaticBool,NT<:StaticBool,RS}
# __vstore!(ptr, convert(Vec{W,T}, v), i, m, A(), S(), NT(), StaticInt{RS}())
m && __vstore!(f, ptr, v, i, A(), S(), NT(), StaticInt{RS}())
end
@inline function __vstore!(
f::F,
ptr::Ptr{T},
v::NativeTypes,
::A,
::S,
::NT,
::StaticInt{RS}
) where {
T<:NativeTypesExceptBit,
F<:Function,
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS
}
__vstore!(ptr, v, A(), S(), NT(), StaticInt{RS}())
end
@inline function __vstore!(
f::F,
ptr::Ptr{T},
v::NativeTypes,
i::IntegerIndex,
::A,
::S,
::NT,
::StaticInt{RS}
) where {
T<:NativeTypesExceptBit,
F<:Function,
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS
}
__vstore!(ptr, v, i, A(), S(), NT(), StaticInt{RS}())
end
for (store, align, alias, nontemporal) ∈ [
(:vstore!, False(), False(), False()),
(:vstorea!, True(), False(), False()),
(:vstorent!, True(), False(), True()),
(:vnoaliasstore!, False(), True(), False()),
(:vnoaliasstorea!, True(), True(), False()),
(:vnoaliasstorent!, True(), True(), True())
]
@eval begin
@inline function $store(ptr::AbstractStridedPointer, v)
__vstore!(ptr, v, $align, $alias, $nontemporal, register_size())
end
@inline function $store(
ptr::AbstractStridedPointer,
v,
i::Union{Tuple,Unroll}
)
_vstore!(ptr, v, i, $align, $alias, $nontemporal, register_size())
end
@inline function $store(
ptr::AbstractStridedPointer,
v,
i::Union{Tuple,Unroll},
m::Union{AbstractMask,VecUnroll}
)
_vstore!(ptr, v, i, m, $align, $alias, $nontemporal, register_size())
end
@inline function $store(
ptr::AbstractStridedPointer,
v,
i::Union{Tuple,Unroll},
b::Bool
)
b && _vstore!(ptr, v, i, $align, $alias, $nontemporal, register_size())
end
@inline function $store(
f::F,
ptr::AbstractStridedPointer,
v
) where {F<:Function}
__vstore!(f, ptr, v, $align, $alias, $nontemporal, register_size())
end
@inline function $store(
f::F,
ptr::AbstractStridedPointer,
v,
i::Union{Tuple,Unroll}
) where {F<:Function}
_vstore!(f, ptr, v, i, $align, $alias, $nontemporal, register_size())
end
@inline function $store(
f::F,
ptr::AbstractStridedPointer,
v,
i::Union{Tuple,Unroll},
m::Union{AbstractMask,VecUnroll}
) where {F<:Function}
_vstore!(f, ptr, v, i, m, $align, $alias, $nontemporal, register_size())
end
@inline function $store(
f::F,
ptr::AbstractStridedPointer,
v,
i::Union{Tuple,Unroll},
b::Bool
) where {F<:Function}
b && _vstore!(f, ptr, v, i, $align, $alias, $nontemporal, register_size())
end
end
end
@inline function __vstore!(
ptr::Ptr,
v,
i,
b::Bool,
::A,
::S,
::NT,
::StaticInt{RS}
) where {A<:StaticBool,S<:StaticBool,NT<:StaticBool,RS}
b && __vstore!(ptr, v, i, A(), S(), NT(), StaticInt{RS}())
nothing
end
@generated function prefetch(ptr::Ptr{Cvoid}, ::Val{L}, ::Val{R}) where {L,R}
L ∈ (0, 1, 2, 3) || throw(
ArgumentError(
"Prefetch intrinsic requires a locality argument of 0, 1, 2, or 3, but received $L."
)
)
R ∈ (0, 1) || throw(
ArgumentError(
"Prefetch intrinsic requires a read/write argument of 0, 1, but received $R."
)
)
decl = "declare void @llvm.prefetch(i8*, i32, i32, i32)"
instrs = """
%addr = inttoptr $JULIAPOINTERTYPE %0 to i8*
call void @llvm.prefetch(i8* %addr, i32 $R, i32 $L, i32 1)
ret void
"""
llvmcall_expr(
decl,
instrs,
:Cvoid,
:(Tuple{Ptr{Cvoid}}),
"void",
[JULIAPOINTERTYPE],
[:ptr],
false,
true
)
end
@inline prefetch(ptr::Ptr{T}, ::Val{L}, ::Val{R}) where {T,L,R} =
prefetch(Base.unsafe_convert(Ptr{Cvoid}, ptr), Val{L}(), Val{R}())
@inline function prefetch(
ptr::Union{AbstractStridedPointer,Ptr},
i,
::Val{Locality},
::Val{ReadOrWrite}
) where {Locality,ReadOrWrite}
prefetch(gep(ptr, i), Val{Locality}(), Val{ReadOrWrite}())
end
@inline prefetch(ptr) = nothing
@inline prefetch(ptr::Ptr) =
prefetch(reinterpret(Ptr{Cvoid}, ptr), Val{3}(), Val{0}())
@inline prefetch(ptr::Ptr, ::Val{L}) where {L} =
prefetch(ptr, Val{L}(), Val{0}())
@inline prefetch(ptr::Ptr, i) = prefetch(ptr, i, Val{3}(), Val{0}())
@inline prefetch(ptr::Ptr, i, ::Val{L}) where {L} =
prefetch(ptr, i, Val{L}(), Val{0}())
@inline prefetch0(x, i) =
prefetch(gep(stridedpointer(x), (data(i),)), Val{3}(), Val{0}())
@inline prefetch0(x, I::Tuple) =
prefetch(gep(stridedpointer(x), data.(I)), Val{3}(), Val{0}())
@inline prefetch0(x, i, j) =
prefetch(gep(stridedpointer(x), (data(i), data(j))), Val{3}(), Val{0}())
# @inline prefetch0(x, i, j, oi, oj) = prefetch(gep(stridedpointer(x), (data(i) + data(oi) - 1, data(j) + data(oj) - 1)), Val{3}(), Val{0}())
@inline prefetch1(x, i) =
prefetch(gep(stridedpointer(x), (data(i),)), Val{2}(), Val{0}())
@inline prefetch1(x, i, j) =
prefetch(gep(stridedpointer(x), (data(i), data(j))), Val{2}(), Val{0}())
# @inline prefetch1(x, i, j, oi, oj) = prefetch(gep(stridedpointer(x), (data(i) + data(oi) - 1, data(j) + data(oj) - 1)), Val{2}(), Val{0}())
@inline prefetch2(x, i) =
prefetch(gep(stridedpointer(x), (data(i),)), Val{1}(), Val{0}())
@inline prefetch2(x, i, j) =
prefetch(gep(stridedpointer(x), (data(i), data(j))), Val{1}(), Val{0}())
# @inline prefetch2(x, i, j, oi, oj) = prefetch(gep(stridedpointer(x), (data(i) + data(oi) - 1, data(j) + data(oj) - 1)), Val{1}(), Val{0}())
@generated function lifetime_start!(ptr::Ptr{T}, ::Val{L}) where {L,T}
ptyp = LLVM_TYPES[T]
decl = "declare void @llvm.lifetime.start(i64, $ptyp* nocapture)"
instrs = "%ptr = inttoptr $JULIAPOINTERTYPE %0 to $ptyp*\ncall void @llvm.lifetime.start(i64 $L, $ptyp* %ptr)\nret void"
llvmcall_expr(
decl,
instrs,
:Cvoid,
:(Tuple{Ptr{$T}}),
"void",
[JULIAPOINTERTYPE],
[:ptr],
false,
true
)
end
@generated function lifetime_end!(ptr::Ptr{T}, ::Val{L}) where {L,T}
ptyp = LLVM_TYPES[T]
decl = "declare void @llvm.lifetime.end(i64, $ptyp* nocapture)"
instrs = "%ptr = inttoptr $JULIAPOINTERTYPE %0 to $ptyp*\ncall void @llvm.lifetime.end(i64 $L, $ptyp* %ptr)\nret void"
llvmcall_expr(
decl,
instrs,
:Cvoid,
:(Tuple{Ptr{$T}}),
"void",
[JULIAPOINTERTYPE],
[:ptr],
false,
true
)
end
# @generated function lifetime_start!(ptr::Ptr{T}, ::Val{L}) where {L,T}
# decl = "declare void @llvm.lifetime.start(i64, i8* nocapture)"
# instrs = "%ptr = inttoptr $JULIAPOINTERTYPE %0 to i8*\ncall void @llvm.lifetime.start(i64 $(L*sizeof(T)), i8* %ptr)\nret void"
# llvmcall_expr(decl, instrs, :Cvoid, :(Tuple{Ptr{$T}}), "void", [JULIAPOINTERTYPE], [:ptr], false, true)
# end
# @generated function lifetime_end!(ptr::Ptr{T}, ::Val{L}) where {L,T}
# decl = "declare void @llvm.lifetime.end(i64, i8* nocapture)"
# instrs = "%ptr = inttoptr $JULIAPOINTERTYPE %0 to i8*\ncall void @llvm.lifetime.end(i64 $(L*sizeof(T)), i8* %ptr)\nret void"
# llvmcall_expr(decl, instrs, :Cvoid, :(Tuple{Ptr{$T}}), "void", [JULIAPOINTERTYPE], [:ptr], false, true)
# end
@inline lifetime_start!(ptr::Ptr) = lifetime_start!(ptr, Val{-1}())
@inline lifetime_end!(ptr::Ptr) = lifetime_end!(ptr, Val{-1}())
# Fallback is to do nothing. Intention is (e.g.) for PaddedMatrices/StackPointers.
@inline lifetime_start!(::Any) = nothing
@inline lifetime_end!(::Any) = nothing
@generated function compressstore!(
ptr::Ptr{T},
v::Vec{W,T},
mask::AbstractMask{W,U}
) where {W,T<:NativeTypes,U<:Unsigned}
typ = LLVM_TYPES[T]
vtyp = "<$W x $typ>"
mtyp_input = LLVM_TYPES[U]
mtyp_trunc = "i$W"
instrs = String["%ptr = inttoptr $JULIAPOINTERTYPE %1 to $typ*"]
truncate_mask!(instrs, '2', W, 0)
decl = "declare void @llvm.masked.compressstore.$(suffix(W,T))($vtyp, $typ*, <$W x i1>)"
push!(
instrs,
"call void @llvm.masked.compressstore.$(suffix(W,T))($vtyp %0, $typ* %ptr, <$W x i1> %mask.0)\nret void"
)
llvmcall_expr(
decl,
join(instrs, "\n"),
:Cvoid,
:(Tuple{_Vec{$W,$T},Ptr{$T},$U}),
"void",
[vtyp, JULIAPOINTERTYPE, "i$(8sizeof(U))"],
[:(data(v)), :ptr, :(data(mask))],
false,
true
)
end
@generated function expandload(
ptr::Ptr{T},
mask::AbstractMask{W,U}
) where {W,T<:NativeTypes,U<:Unsigned}
typ = LLVM_TYPES[T]
vtyp = "<$W x $typ>"
vptrtyp = "<$W x $typ*>"
mtyp_input = LLVM_TYPES[U]
mtyp_trunc = "i$W"
instrs = String[]
push!(instrs, "%ptr = inttoptr $JULIAPOINTERTYPE %0 to $typ*")
if mtyp_input == mtyp_trunc
push!(instrs, "%mask = bitcast $mtyp_input %1 to <$W x i1>")
else
push!(instrs, "%masktrunc = trunc $mtyp_input %1 to $mtyp_trunc")
push!(instrs, "%mask = bitcast $mtyp_trunc %masktrunc to <$W x i1>")
end
decl = "declare $vtyp @llvm.masked.expandload.$(suffix(W,T))($typ*, <$W x i1>, $vtyp)"
push!(
instrs,
"%res = call $vtyp @llvm.masked.expandload.$(suffix(W,T))($typ* %ptr, <$W x i1> %mask, $vtyp zeroinitializer)\nret $vtyp %res"
)
llvmcall_expr(
decl,
join(instrs, "\n"),
:(_Vec{$W,$T}),
:(Tuple{Ptr{$T},$U}),
vtyp,
[JULIAPOINTERTYPE, "i$(8sizeof(U))"],
[:ptr, :(data(mask))],
false,
true
)
end
# fallback definitions
@generated function __vload(
p::Ptr{T},
i::Index,
::A,
::StaticInt{RS}
) where {T,A,RS}
if Base.allocatedinline(T)
Expr(:block, Expr(:meta, :inline), :(unsafe_load(p + convert(Int, i))))
else
Expr(
:block,
Expr(:meta, :inline),
:(ccall(
:jl_value_ptr,
Ref{$T},
(Ptr{Cvoid},),
unsafe_load(Base.unsafe_convert(Ptr{Ptr{Cvoid}}, p) + convert(Int, i))
))
)
end
end
@generated function __vstore!(
p::Ptr{T},
v::T,
i::Index,
::A,
::S,
::NT,
::StaticInt{RS}
) where {T,A,S,NT,RS}
if Base.allocatedinline(T)
Expr(
:block,
Expr(:meta, :inline),
:(unsafe_store!(p + convert(Int, i), v); return nothing)
)
else
Expr(
:block,
Expr(:meta, :inline),
:(
unsafe_store!(
Base.unsafe_convert(Ptr{Ptr{Cvoid}}, p) + convert(Int, i),
Base.pointer_from_objref(v)
);
return nothing
)
)
end
end
# @inline function Base.getindex(A::AbstractArray{<:Number}, i::Unroll)
# sp, pres = stridedpointer_preserve(A)
# GC.@preserve pres begin
# x = vload(sp, i)
# end
# return x
# end
# @inline function Base.getindex(A::ArrayInterface.AbstractArray2{<:Number}, i::Unroll)
# sp, pres = stridedpointer_preserve(A)
# GC.@preserve pres begin
# x = vload(sp, i)
# end
# return x
# end
# @inline function Base.setindex!(A::AbstractArray{<:Number}, v, i::Unroll)
# sp, pres = stridedpointer_preserve(A)
# GC.@preserve pres begin
# vstore!(sp, v, i)
# end
# return v
# end
# @inline function Base.setindex!(A::ArrayInterface.AbstractArray2{<:Number}, v, i::Unroll)
# sp, pres = stridedpointer_preserve(A)
# GC.@preserve pres begin
# vstore!(sp, v, i)
# end
# return v
# end
# @inline function Base.getindex(A::AbstractArray{<:Number}, i::AbstractSIMD)
# sp, pres = stridedpointer_preserve(A)
# GC.@preserve pres begin
# x = vload(zero_offsets(sp), (vsub_nw(i,ArrayInterface.offset1(A)),))
# end
# return x
# end
# @inline function Base.getindex(A::ArrayInterface.AbstractArray2{<:Number}, i::AbstractSIMD)
# sp, pres = stridedpointer_preserve(A)
# GC.@preserve pres begin
# x = vload(zero_offsets(sp), (vsub_nw(i,ArrayInterface.offset1(A)),))
# end
# return x
# end
# @inline function Base.getindex(A::AbstractArray{<:Number}, i::Vararg{Union{Integer,AbstractSIMD},K}) where {K}
# sp, pres = stridedpointer_preserve(A)
# GC.@preserve pres begin
# x = vload(sp, i)
# end
# return x
# end
# @inline function Base.getindex(A::ArrayInterface.AbstractArray2{<:Number}, i::Vararg{Union{Integer,AbstractSIMD},K}) where {K}
# sp, pres = stridedpointer_preserve(A)
# GC.@preserve pres begin
# x = vload(sp, i)
# end
# return x
# end
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 4630 |
@generated function addscalar(v::Vec{W,T}, s::T) where {W,T<:IntegerTypesHW}
typ = "i$(8sizeof(T))"
vtyp = "<$W x $typ>"
instrs = String[]
push!(instrs, "%ie = insertelement $vtyp zeroinitializer, $typ %1, i32 0")
push!(instrs, "%v = add $vtyp %0, %ie")
push!(instrs, "ret $vtyp %v")
quote
$(Expr(:meta, :inline))
Vec(
$LLVMCALL(
$(join(instrs, "\n")),
NTuple{$W,Core.VecElement{$T}},
Tuple{NTuple{$W,Core.VecElement{$T}},$T},
data(v),
s
)
)
end
end
@generated function addscalar(v::Vec{W,T}, s::T) where {W,T<:FloatingTypes}
typ = LLVM_TYPES[T]
vtyp = "<$W x $typ>"
instrs = String[]
push!(instrs, "%ie = insertelement $vtyp zeroinitializer, $typ %1, i32 0")
push!(instrs, "%v = fadd nsz arcp contract afn reassoc $vtyp %0, %ie")
push!(instrs, "ret $vtyp %v")
quote
$(Expr(:meta, :inline))
Vec(
$LLVMCALL(
$(join(instrs, "\n")),
NTuple{$W,Core.VecElement{$T}},
Tuple{NTuple{$W,Core.VecElement{$T}},$T},
data(v),
s
)
)
end
end
@generated function mulscalar(v::Vec{W,T}, s::T) where {W,T<:IntegerTypesHW}
typ = "i$(8sizeof(T))"
vtyp = "<$W x $typ>"
instrs = String[]
push!(
instrs,
"%ie = insertelement $vtyp $(llvmconst(W, T, 1)), $typ %1, i32 0"
)
push!(instrs, "%v = mul $vtyp %0, %ie")
push!(instrs, "ret $vtyp %v")
quote
$(Expr(:meta, :inline))
Vec(
$LLVMCALL(
$(join(instrs, "\n")),
NTuple{$W,Core.VecElement{$T}},
Tuple{NTuple{$W,Core.VecElement{$T}},$T},
data(v),
s
)
)
end
end
@generated function mulscalar(v::Vec{W,T}, s::T) where {W,T<:FloatingTypes}
typ = LLVM_TYPES[T]
vtyp = "<$W x $typ>"
instrs = String[]
push!(
instrs,
"%ie = insertelement $vtyp $(llvmconst(W, T, 1.0)), $typ %1, i32 0"
)
push!(instrs, "%v = fmul nsz arcp contract afn reassoc $vtyp %0, %ie")
push!(instrs, "ret $vtyp %v")
quote
$(Expr(:meta, :inline))
Vec(
$LLVMCALL(
$(join(instrs, "\n")),
NTuple{$W,Core.VecElement{$T}},
Tuple{NTuple{$W,Core.VecElement{$T}},$T},
data(v),
s
)
)
end
end
function scalar_maxmin(W::Int, @nospecialize(_::Type{T}), ismax::Bool) where {T}
if T <: Integer
typ = "i$(8sizeof(T))"
comp =
(T <: Signed) ? (ismax ? "icmp sgt" : "icmp slt") :
(ismax ? "icmp ugt" : "icmp ult")
basevalue = llvmconst(W, T, ismax ? typemin(T) : typemax(T))
else
opzero = ismax ? -Inf : Inf
comp = ismax ? "fcmp ogt" : "fcmp olt"
basevalue = llvmconst(W, T, repr(reinterpret(UInt64, opzero)))
if T === Float64
typ = "double"
# basevalue = llvmconst(W, T, repr(reinterpret(UInt64, opzero)))
elseif T === Float32
typ = "float"
# basevalue = llvmconst(W, T, repr(reinterpret(UInt32, Float32(opzero))))
# elseif T === Float16
# typ = "half"
# basevalue = llvmconst(W, T, repr(reinterpret(UInt16, Float16(opzero))))
else
throw("T === $T not currently supported.")
end
end
_scalar_maxmin(W, typ, comp, basevalue)
end
function _scalar_maxmin(W::Int, typ::String, comp::String, basevalue::String)
vtyp = "<$W x $typ>"
String[
"%ie = insertelement $vtyp $(basevalue), $typ %1, i32 0",
"%selection = $comp $vtyp %0, %ie",
"%v = select <$W x i1> %selection, $vtyp %0, $vtyp %ie",
"ret $vtyp %v"
]
end
@generated function maxscalar(v::Vec{W,T}, s::T) where {W,T<:NativeTypes}
instrs = scalar_maxmin(W, T, true)
quote
$(Expr(:meta, :inline))
Vec(
$LLVMCALL(
$(join(instrs, "\n")),
NTuple{$W,Core.VecElement{$T}},
Tuple{NTuple{$W,Core.VecElement{$T}},$T},
data(v),
s
)
)
end
end
@generated function minscalar(v::Vec{W,T}, s::T) where {W,T<:NativeTypes}
instrs = scalar_maxmin(W, T, false)
quote
$(Expr(:meta, :inline))
Vec(
$LLVMCALL(
$(join(instrs, "\n")),
NTuple{$W,Core.VecElement{$T}},
Tuple{NTuple{$W,Core.VecElement{$T}},$T},
data(v),
s
)
)
end
end
for (f, op) ∈ [
(:addscalar, :(+)),
(:mulscalar, :(*)),
(:maxscalar, :max),
(:minscalar, :min)
]
@eval begin
@inline $f(v::VecUnroll, s) = VecUnroll((
$f(first(getfield(v, :data)), s),
Base.tail(getfield(v, :data))...
))
@inline $f(v::Vec{W,T}, s::NativeTypes) where {W,T<:NativeTypes} =
$f(v, vconvert(T, s))
@inline $f(s::NativeTypes, v::AbstractSIMD{W,T}) where {W,T<:NativeTypes} =
$f(v, s)
@inline $f(a, b) = $op(a, b)
end
end
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 2551 |
function sub_quote(W::Int, T::Symbol, fast::Bool)::Expr
vtyp = vtype(W, T)
instrs = "%res = fneg $(fast_flags(fast)) $vtyp %0\nret $vtyp %res"
quote
$(Expr(:meta, :inline))
Vec($LLVMCALL($instrs, _Vec{$W,$T}, Tuple{_Vec{$W,$T}}, data(v)))
end
end
@generated vsub(v::Vec{W,T}) where {W,T<:Union{Float32,Float64}} =
sub_quote(W, JULIA_TYPES[T], false)
@generated vsub_fast(v::Vec{W,T}) where {W,T<:Union{Float32,Float64}} =
sub_quote(W, JULIA_TYPES[T], true)
@inline vsub(v) = -v
@inline vsub_fast(v) = Base.FastMath.sub_fast(v)
@inline vsub(v::Vec{<:Any,<:NativeTypes}) = vsub(zero(v), v)
@inline vsub_fast(v::Vec{<:Any,<:UnsignedHW}) = vsub(zero(v), v)
@inline vsub_fast(v::Vec{<:Any,<:NativeTypes}) = vsub_fast(zero(v), v)
@inline vsub(x::NativeTypes) = Base.FastMath.sub_fast(x)
@inline vsub_fast(x::NativeTypes) = Base.FastMath.sub_fast(x)
@inline vinv(v) = inv(v)
@inline vinv(v::AbstractSIMD{W,<:FloatingTypes}) where {W} = vfdiv(one(v), v)
@inline vinv(v::AbstractSIMD{W,<:IntegerTypesHW}) where {W} = inv(float(v))
@inline Base.FastMath.inv_fast(v::AbstractSIMD) =
Base.FastMath.div_fast(one(v), v)
@inline vabs(v) = abs(v)
@inline vabs(v::AbstractSIMD{W,<:Unsigned}) where {W} = v
@inline vabs(v::AbstractSIMD{W,<:Signed}) where {W} = ifelse(v > 0, v, -v)
@inline vround(v) = round(v)
@inline vround(v::AbstractSIMD{W,<:Union{Integer,StaticInt}}) where {W} = v
@inline vround(
v::AbstractSIMD{W,<:Union{Integer,StaticInt}},
::RoundingMode
) where {W} = v
function bswap_quote(W::Int, T::Symbol, st::Int)::Expr
typ = 'i' * string(8st)
suffix = 'v' * string(W) * typ
vtyp = "<$W x $typ>"
decl = "declare $(vtyp) @llvm.bswap.$(suffix)($(vtyp))"
instrs = """
%res = call $vtyp @llvm.bswap.$(suffix)($vtyp %0)
ret $vtyp %res
"""
ret_type = :(_Vec{$W,$T})
llvmcall_expr(
decl,
instrs,
ret_type,
:(Tuple{$ret_type}),
vtyp,
[vtyp],
[:(data(x))]
)
end
@generated Base.bswap(x::Vec{W,T}) where {T<:IntegerTypesHW,W} =
bswap_quote(W, JULIA_TYPES[T], sizeof(T))
@inline Base.bswap(x::VecUnroll{<:Any,<:Any,<:IntegerTypesHW}) =
VecUnroll(fmap(bswap, data(x)))
@inline Base.bswap(x::AbstractSIMDVector{<:Any,<:IntegerTypesHW}) =
bswap(Vec(x))
@inline Base.bswap(x::AbstractSIMD{<:Any,Float16}) =
reinterpret(Float16, bswap(reinterpret(UInt16, x)))
@inline Base.bswap(x::AbstractSIMD{<:Any,Float32}) =
reinterpret(Float32, bswap(reinterpret(UInt32, x)))
@inline Base.bswap(x::AbstractSIMD{<:Any,Float64}) =
reinterpret(Float64, bswap(reinterpret(UInt64, x)))
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 9921 |
@inline vzero(::Val{1}, ::Type{T}) where {T<:NativeTypes} = zero(T)
@inline vzero(::StaticInt{1}, ::Type{T}) where {T<:NativeTypes} = zero(T)
@inline _vzero(::StaticInt{W}, ::Type{Float16}, ::StaticInt{RS}) where {W,RS} =
_vzero_float16(StaticInt{W}(), StaticInt{RS}(), fast_half())
@inline _vzero_float16(::StaticInt{W}, ::StaticInt{RS}, ::False) where {W,RS} =
_vzero(StaticInt{W}(), Float32, StaticInt{RS}())
function _vzero_expr(W::Int, typ::String, T::Symbol, st::Int, RS::Int)
isone(W) && return Expr(:block, Expr(:meta, :inline), Expr(:call, :zero, T))
# if W * st > RS
# d, r1 = divrem(st * W, RS)
# Wnew, r2 = divrem(W, d)
# (iszero(r1) & iszero(r2)) || throw(ArgumentError("If broadcasting to greater than 1 vector length, should make it an integer multiple of the number of vectors."))
# t = Expr(:tuple)
# for i ∈ 1:d
# push!(t.args, :v)
# end
# # return Expr(:block, Expr(:meta,:inline), :(v = vzero(StaticInt{$Wnew}(), $T)), :(VecUnroll{$(d-1),$Wnew,$T,Vec{$Wnew,$T}}($t)))
# return Expr(:block, Expr(:meta,:inline), :(v = _vzero(StaticInt{$Wnew}(), $T, StaticInt{$RS}())), :(VecUnroll($t)))
# # return Expr(:block, Expr(:meta,:inline), :(v = _vzero(StaticInt{$Wnew}(), $T, StaticInt{$RS}())), :(VecUnroll($t)::VecUnroll{$(d-1),$Wnew,$T,Vec{$Wnew,$T}}))
# end
instrs = "ret <$W x $typ> zeroinitializer"
quote
$(Expr(:meta, :inline))
Vec($LLVMCALL($instrs, _Vec{$W,$T}, Tuple{}))
end
end
@generated _vzero_float16(::StaticInt{W}, ::StaticInt{RS}) where {W,RS} =
_vzero_expr(W, "half", :Float16, 2, RS)
@generated _vzero(
::StaticInt{W},
::Type{T},
::StaticInt{RS}
) where {W,T<:NativeTypesExceptFloat16,RS} =
_vzero_expr(W, LLVM_TYPES[T], JULIA_TYPES[T], sizeof(T), RS)
function vundef_expr(W::Int, typ::String, T::Symbol)
if T === :Bit
W == 1 ? false : Mask(zero_mask(Val(W)))
elseif W == 1
instrs = "ret $typ undef"
quote
$(Expr(:meta, :inline))
$LLVMCALL($instrs, $T, Tuple{})
end
else
instrs = "ret <$W x $typ> undef"
quote
$(Expr(:meta, :inline))
Vec($LLVMCALL($instrs, _Vec{$W,$T}, Tuple{}))
end
end
end
@generated function _vundef(
::StaticInt{W},
::Type{T}
) where {W,T<:NativeTypesExceptFloat16}
vundef_expr(W, LLVM_TYPES[T], JULIA_TYPES[T])
end
@generated function _vundef(::StaticInt{W}, ::Type{Float16}) where {W}
_vundef_float16(StaticInt{W}(), fast_half())
end
@generated _vundef_float16(::StaticInt{W}, ::True) where {W} =
vundef_expr(W, "half", :Float16)
@generated _vundef_float16(::StaticInt{W}, ::False) where {W} =
vundef_expr(W, "float", :Float32)
@inline _vundef(::T) where {T<:NativeTypes} = _vundef(StaticInt{1}(), T)
@inline _vundef(::Vec{W,T}) where {W,T} = _vundef(StaticInt{W}(), T)
@generated _vundef(::VecUnroll{N,W,T}) where {N,W,T} = Expr(
:block,
Expr(:meta, :inline),
:(VecUnroll(
Base.Cartesian.@ntuple $(N + 1) n -> _vundef(StaticInt{$W}(), $T)
))
)
function vbroadcast_expr(W::Int, typ::String, T::Symbol, st::Int, RS::Int)
isone(W) && return :s
# if st * W > RS
# d, r1 = divrem(st * W, RS)
# Wnew, r2 = divrem(W, d)
# (iszero(r1) & iszero(r2)) || throw(ArgumentError("If broadcasting to greater than 1 vector length, should make it an integer multiple of the number of vectors."))
# t = Expr(:tuple)
# for i ∈ 1:d
# push!(t.args, :v)
# end
# return Expr(:block, Expr(:meta,:inline), :(v = _vbroadcast(StaticInt{$Wnew}(), s, StaticInt{$RS}())), :(VecUnroll($t)))
# end
vtyp = vtype(W, typ)
instrs = """
%ie = insertelement $vtyp undef, $typ %0, i32 0
%v = shufflevector $vtyp %ie, $vtyp undef, <$W x i32> zeroinitializer
ret $vtyp %v
"""
quote
$(Expr(:meta, :inline))
Vec($LLVMCALL($instrs, _Vec{$W,$T}, Tuple{$T}, s))
end
end
@inline _vbroadcast(::StaticInt{W}, s::Float16, ::StaticInt{RS}) where {W,RS} =
_vbroadcast_float16(StaticInt{W}(), s, StaticInt{RS}(), fast_half())
@inline _vbroadcast_float16(
::StaticInt{W},
s::Float16,
::StaticInt{RS},
::False
) where {W,RS} =
_vbroadcast(StaticInt{W}(), convert(Float32, s), StaticInt{RS}())
@generated _vbroadcast_float16(
::StaticInt{W},
s::Float16,
::StaticInt{RS},
::True
) where {W,RS} = vbroadcast_expr(W, "half", :Float16, 2, RS)
@inline function _vbroadcast(
::StaticInt{W},
s::Bool,
::StaticInt{RS}
) where {W,RS}
t = Mask(max_mask(StaticInt{W}()))
f = Mask(zero_mask(StaticInt{W}()))
Core.ifelse(s, t, f)
end
@generated function _vbroadcast(
::StaticInt{W},
s::_T,
::StaticInt{RS}
) where {W,_T<:NativeTypesExceptFloat16,RS}
if (_T <: Integer) && (sizeof(_T) * W > RS) && sizeof(_T) ≥ 8
intbytes = max(4, RS ÷ W)
T = integer_of_bytes(intbytes)
if _T <: Unsigned
T = unsigned(T)
end
# ssym = :(s % $T)
if T ≢ _T
return Expr(
:block,
Expr(:meta, :inline),
:(_vbroadcast(StaticInt{$W}(), convert($T, s), StaticInt{$RS}()))
)
end
end
vbroadcast_expr(W, LLVM_TYPES[_T], JULIA_TYPES[_T], sizeof(_T), RS)
end
@inline _vbroadcast(
::StaticInt{W},
m::EVLMask{W},
::StaticInt{RS}
) where {W,RS} = Mask(m)
@inline vzero(::Union{Val{W},StaticInt{W}}, ::Type{T}) where {W,T} =
_vzero(StaticInt{W}(), T, register_size(T))
@inline vbroadcast(::Union{Val{W},StaticInt{W}}, s::T) where {W,T} =
_vbroadcast(StaticInt{W}(), s, register_size(T))
@inline function _vbroadcast(
::StaticInt{W},
vu::VecUnroll{N,1,T,T},
::StaticInt{RS}
) where {W,N,T,RS}
VecUnroll(fmap(_vbroadcast, StaticInt{W}(), data(vu), StaticInt{RS}()))
end
@generated function vbroadcast(
::Union{Val{W},StaticInt{W}},
ptr::Ptr{T}
) where {W,T}
isone(W) && return Expr(:block, Expr(:meta, :inline), :(vload(ptr)))
typ = LLVM_TYPES[T]
ptyp = JULIAPOINTERTYPE
vtyp = "<$W x $typ>"
alignment = Base.datatype_alignment(T)
instrs = """
%ptr = inttoptr $ptyp %0 to $typ*
%res = load $typ, $typ* %ptr, align $alignment
%ie = insertelement $vtyp undef, $typ %res, i32 0
%v = shufflevector $vtyp %ie, $vtyp undef, <$W x i32> zeroinitializer
ret $vtyp %v
"""
quote
$(Expr(:meta, :inline))
Vec($LLVMCALL($instrs, _Vec{$W,$T}, Tuple{Ptr{$T}}, ptr))
end
end
@inline vbroadcast(
::Union{Val{W},StaticInt{W}},
v::AbstractSIMDVector{W}
) where {W} = v
@generated function vbroadcast(
::Union{Val{W},StaticInt{W}},
v::V
) where {W,L,T,V<:AbstractSIMDVector{L,T}}
N, r = divrem(L, W)
@assert iszero(r)
V = if T === Bit
:(Mask{$W,$(mask_type_symbol(W))})
else
:(Vec{$W,$T})
end
Expr(
:block,
Expr(:meta, :inline),
:(vconvert(VecUnroll{$(N - 1),$W,$T,$V}, v))
)
end
@inline Vec{W,T}(v::Vec{W,T}) where {W,T} = v
@inline Base.zero(::Type{Vec{W,T}}) where {W,T} =
_vzero(StaticInt{W}(), T, StaticInt{W}() * static_sizeof(T))
@inline Base.zero(::Vec{W,T}) where {W,T} = zero(Vec{W,T})
@inline Base.one(::Vec{W,T}) where {W,T} = vbroadcast(Val{W}(), one(T))
@inline Base.one(::Type{Vec{W,T}}) where {W,T} = vbroadcast(Val{W}(), one(T))
@inline Base.oneunit(::Type{Vec{W,T}}) where {W,T} =
vbroadcast(Val{W}(), one(T))
@inline vzero(::Type{T}) where {T<:Number} = zero(T)
@inline vzero() = vzero(pick_vector_width(Float64), Float64)
@inline Vec{W,T}(s::Real) where {W,T} = vbroadcast(Val{W}(), T(s))
@inline Vec{W}(s::T) where {W,T<:NativeTypes} = vbroadcast(Val{W}(), s)
@inline Vec(s::T) where {T<:NativeTypes} = vbroadcast(pick_vector_width(T), s)
@generated function _vzero(
::Type{VecUnroll{N,W,T,V}},
::StaticInt{RS}
) where {N,W,T,V,RS}
t = Expr(:tuple)
z = W == 1 ? :(zero($T)) : :(_vzero(StaticInt{$W}(), $T, StaticInt{$RS}()))
for _ ∈ 0:N
push!(t.args, z)
end
Expr(:block, Expr(:meta, :inline), :(VecUnroll($t)))
end
@inline Base.zero(::Type{VecUnroll{N,W,T,V}}) where {N,W,T,V} =
_vzero(VecUnroll{N,W,T,V}, register_size())
@inline Base.zero(::VecUnroll{N,W,T,V}) where {N,W,T,V} =
zero(VecUnroll{N,W,T,V})
@inline Base.one(::Type{VecUnroll{N,W,T,V}}) where {N,W,T,V} =
VecUnroll{N}(one(V))
@generated function VecUnroll{N,W,T,V}(
x::S
) where {N,W,T,V<:AbstractSIMDVector{W,T},S<:Real}
t = Expr(:tuple)
for n ∈ 0:N
push!(t.args, :(convert($V, x)))
end
Expr(:block, Expr(:meta, :inline), :(VecUnroll($t)))
end
@generated function VecUnroll{N,1,T,T}(x::S) where {N,T<:NativeTypes,S<:Real}
t = Expr(:tuple)
for n ∈ 0:N
push!(t.args, :(convert($T, x)))
end
Expr(:block, Expr(:meta, :inline), :(VecUnroll($t)))
end
@inline VecUnroll{N,W,T}(x::NativeTypesV) where {N,W,T} =
VecUnroll{N,W,T,Vec{W,T}}(x)
@inline VecUnroll{N}(x::V) where {N,W,T,V<:AbstractSIMDVector{W,T}} =
VecUnroll{N,W,T,V}(x)
@inline VecUnroll{N}(x::T) where {N,T<:NativeTypes} = VecUnroll{N,1,T,T}(x)
@generated function zero_vecunroll(
::StaticInt{N},
::StaticInt{W},
::Type{T},
::StaticInt{RS}
) where {N,W,T,RS}
Expr(
:block,
Expr(:meta, :inline),
:(_vzero(VecUnroll{$(N - 1),$W,$T,Vec{$W,$T}}, StaticInt{$RS}()))
)
end
@inline zero_init(
::Type{T},
::StaticInt{1},
::StaticInt{0},
::StaticInt{RS}
) where {T,RS} = zero(T)
@inline zero_init(
::Type{T},
::StaticInt{W},
::StaticInt{0},
::StaticInt{RS}
) where {W,T,RS} = _vzero(StaticInt{W}(), T, StaticInt{RS}())
@inline zero_init(
::Type{T},
::StaticInt{W},
::StaticInt{U},
::StaticInt{RS}
) where {W,U,T,RS} = _vzero(VecUnroll{U,W,T,Vec{W,T}}, StaticInt{RS}())
@inline zero_init(
::Type{T},
::Tuple{StaticInt{W},StaticInt{U}},
::StaticInt{RS}
) where {W,U,T,RS} = zero_init(T, StaticInt{W}(), StaticInt{U}(), StaticInt{RS})
@generated function vbroadcast_vecunroll(
::StaticInt{N},
::StaticInt{W},
s::T,
::StaticInt{RS}
) where {N,W,T,RS}
q = Expr(
:block,
Expr(:meta, :inline),
:(v = _vbroadcast(StaticInt{$W}(), s, StaticInt{$RS}()))
)
t = Expr(:tuple)
for n ∈ 1:N
push!(t.args, :v)
end
push!(q.args, :(VecUnroll($t)))
q
end
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 14713 |
function shufflevector_instrs(
W::Int,
@nospecialize(T),
I::Vector{String},
W2::Int
)
W2 > W && throw(
ArgumentError(
"W for vector 1 must be at least W for vector two, but W₁ = $W < W₂ = $W2."
)
)
typ::String = (LLVM_TYPES[T])::String
vtyp1::String = "<$W x $typ>"
M::Int = length(I)
vtyp3::String = "<$M x i32>"
vtypr::String = "<$M x $typ>"
mask::String = '<' * join(I, ", ")::String * '>'
if ((W2 == 0) | (W2 == W))
v2 = W2 == 0 ? "undef" : "%1"
M,
"""
%res = shufflevector $vtyp1 %0, $vtyp1 $v2, $vtyp3 $mask
ret $vtypr %res
"""
else
vtyp0 = "<$W2 x $typ>"
maskpad =
'<' *
join(
map(w -> string("i32 ", w > W2 ? "undef" : string(w - 1)), 1:W),
", "
) *
'>'
M,
"""
%pad = shufflevector $vtyp0 %1, $vtyp0 undef, <$W x i32> $maskpad
%res = shufflevector $vtyp1 %0, $vtyp1 %pad, $vtyp3 $mask
ret $vtypr %res
"""
end
end
function tupletostringvector(@nospecialize(x::NTuple{N,Int})) where {N}
y = Vector{String}(undef, N)
@inbounds for n ∈ 1:N
y[n] = string("i32 ", x[n])
end
y
end
@generated function shufflevector(
v1::Vec{W,T},
v2::Vec{W2,T},
::Val{I}
) where {W,W2,T,I}
W ≥ W2 || throw(
ArgumentError(
"`v1` should be at least as long as `v2`, but `v1` is a `Vec{$W,$T}` and `v2` is a `Vec{$W2,$T}`."
)
)
M, instrs = shufflevector_instrs(W, T, tupletostringvector(I), W2)
quote
$(Expr(:meta, :inline))
Vec(
$LLVMCALL(
$instrs,
_Vec{$M,$T},
Tuple{_Vec{$W,$T},_Vec{$W2,$T}},
data(v1),
data(v2)
)
)
end
end
@inline shufflevector(x::T, y::T, ::Val{(0, 1)}) where {T<:NativeTypes} =
Vec(x, y)
@inline shufflevector(x::T, y::T, ::Val{(1, 0)}) where {T<:NativeTypes} =
Vec(y, x)
# @inline function shufflevector(v::Vec{W,T} x::T, ::Val{I}) where {I}
# end
@generated function shufflevector(v1::Vec{W,T}, ::Val{I}) where {W,T,I}
if length(I) == 1
return Expr(:block, Expr(:meta, :inline), :(extractelement(v1, $(only(I)))))
end
M, instrs = shufflevector_instrs(W, T, tupletostringvector(I), 0)
quote
$(Expr(:meta, :inline))
Vec($LLVMCALL($instrs, _Vec{$M,$T}, Tuple{_Vec{$W,$T}}, data(v1)))
end
end
@generated function vresize(
::Union{StaticInt{W},Val{W}},
v::Vec{L,T}
) where {W,L,T}
typ = LLVM_TYPES[T]
mask =
'<' *
join(map(x -> string("i32 ", x ≥ L ? "undef" : string(x)), 0:W-1), ", ") *
'>'
instrs = """
%res = shufflevector <$L x $typ> %0, <$L x $typ> undef, <$W x i32> $mask
ret <$W x $typ> %res
"""
quote
$(Expr(:meta, :inline))
Vec($LLVMCALL($instrs, _Vec{$W,$T}, Tuple{_Vec{$L,$T}}, data(v)))
end
end
@generated function vresize(
::Union{StaticInt{W},Val{W}},
v::T
) where {W,T<:NativeTypes}
typ = LLVM_TYPES[T]
vtyp = vtype(W, typ)
instrs = """
%ie = insertelement $vtyp undef, $typ %0, i32 0
ret $vtyp %ie
"""
quote
$(Expr(:meta, :inline))
Vec($LLVMCALL($instrs, _Vec{$W,$T}, Tuple{$T}, v))
end
end
@generated function shufflevector(i::MM{W,X}, ::Val{I}) where {W,X,I}
allincr = true
L = length(I)
for l ∈ 2:L
allincr &= (I[l] == I[l-1] + 1)
end
allincr || return Expr(
:block,
Expr(:meta, :inline),
:(shufflevector(Vec(i), Val{$I}()))
)
Expr(
:block,
Expr(:meta, :inline),
:(MM{$L,$X}(extractelement(i, $(first(I)))))
)
end
@generated function vcat(a::Vec{W1,T}, b::Vec{W2,T}) where {W1,W2,T}
W1 ≥ W2 || throw(
ArgumentError(
"`v1` should be at least as long as `v2`, but `v1` is a `Vec{$W1,$T}` and `v2` is a `Vec{$W2,$T}`."
)
)
mask = Vector{String}(undef, 2W1)
for w ∈ 0:W1+W2-1
mask[w+1] = string("i32 ", w)
end
for w ∈ W1+W2:2W1-1
mask[w+1] = "i32 undef"
end
M, instrs = shufflevector_instrs(W1, T, mask, W2)
quote
$(Expr(:meta, :inline))
Vec(
$LLVMCALL(
$instrs,
_Vec{$M,$T},
Tuple{_Vec{$W1,$T},_Vec{$W2,$T}},
data(a),
data(b)
)
)
end
end
@inline vcat(x::Base.HWReal, y::Base.HWReal) = Vec(x, y)
@inline vcat(
a::VecUnroll{N,W1,T,Vec{W1,T}},
b::VecUnroll{N,W2,T,Vec{W2,T}}
) where {N,W1,W2,T} = VecUnroll(fmap(vcat, data(a), data(b)))
@generated function hcat(
a::VecUnroll{N1,W,T,V},
b::VecUnroll{N2,W,T,V}
) where {N1,N2,W,T,V}
q = Expr(:block, Expr(:meta, :inline), :(da = data(a)), :(db = data(b)))
t = Expr(:tuple)
for (d, N) ∈ ((:da, N1), (:db, N2))
for n ∈ 1:N
push!(t.args, Expr(:call, :getfield, d, n, false))
end
end
push!(q.args, :(VecUnroll($t)))
q
end
function transpose_vecunroll_quote(W)
ispow2(W) || throw(
ArgumentError(
"Only supports powers of 2 for vector width and unrolling factor, but recieved $W = $W."
)
)
log2W = intlog2(W)
q = Expr(:block, Expr(:meta, :inline), :(vud = data(vu)))
N = W # N vectors of length W
vectors1 = [Symbol(:v_, n) for n ∈ 0:N-1]
vectors2 = [Symbol(:v_, n + N) for n ∈ 0:N-1]
# z = Expr(:call, Expr(:curly, Expr(:(.), :VectorizationBase, QuoteNode(:MM)), W), 0)
# for n ∈ 1:N
# push!(q.args, Expr(:(=), vectors1[n], Expr(:call, Expr(:(.), :VectorizationBase, QuoteNode(:vload)), :ptrA, Expr(:tuple, z, n-1))))
# end
for n ∈ 1:N
push!(
q.args,
Expr(:(=), vectors1[n], Expr(:call, :getfield, :vud, n, false))
)
end
Nhalf = N >>> 1
vecstride = 1
partition_stride = 2
for nsplits = 0:log2W-1
shuffle0 = transposeshuffle(nsplits, W, false)
shuffle1 = transposeshuffle(nsplits, W, true)
for partition ∈ 0:(W>>>(nsplits+1))-1
for _n1 ∈ 1:vecstride
n1 = partition * partition_stride + _n1
n2 = n1 + vecstride
v11 = vectors1[n1]
v12 = vectors1[n2]
v21 = vectors2[n1]
v22 = vectors2[n2]
shuff1 = Expr(:call, :shufflevector, v11, v12, shuffle0)
shuff2 = Expr(:call, :shufflevector, v11, v12, shuffle1)
push!(q.args, Expr(:(=), v21, shuff1))
push!(q.args, Expr(:(=), v22, shuff2))
end
end
vectors1, vectors2 = vectors2, vectors1
vecstride <<= 1
partition_stride <<= 1
# @show vecstride <<= 1
end
t = Expr(:tuple)
for n ∈ 1:N
push!(t.args, vectors1[n])
end
# for n ∈ 1:N
# push!(q.args, Expr(:(=), vectors1[n], Expr(:call, Expr(:(.), :VectorizationBase, QuoteNode(:vstore!)), :ptrB, vectors1[n], Expr(:tuple, z, n-1))))
# end
push!(q.args, Expr(:call, :VecUnroll, t))
q
end
function subset_tup(W, o)
t = Expr(:tuple)
for w ∈ o:W-1+o
push!(t.args, w)
end
Expr(:call, Expr(:curly, :Val, t))
end
function transpose_vecunroll_quote_W_larger(N, W)
(ispow2(W) & ispow2(N)) || throw(
ArgumentError(
"Only supports powers of 2 for vector width and unrolling factor, but recieved $N and $W."
)
)
log2W = intlog2(W)
log2N = intlog2(N)
q = Expr(:block, Expr(:meta, :inline), :(vud = data(vu)))
# N = W # N vectors of length W
vectors1 = [Symbol(:v_, n) for n ∈ 0:N-1]
vectors2 = [Symbol(:v_, n + N) for n ∈ 0:N-1]
# z = Expr(:call, Expr(:curly, Expr(:(.), :VectorizationBase, QuoteNode(:MM)), W), 0)
# for n ∈ 1:N
# push!(q.args, Expr(:(=), vectors1[n], Expr(:call, Expr(:(.), :VectorizationBase, QuoteNode(:vload)), :ptrA, Expr(:tuple, z, n-1))))
# end
for n ∈ 1:N
push!(
q.args,
Expr(:(=), vectors1[n], Expr(:call, :getfield, :vud, n, false))
)
end
Nhalf = N >>> 1
vecstride = 1
partition_stride = 2
for nsplits = 0:log2N-1
shuffle0 = transposeshuffle(nsplits, W, false)
shuffle1 = transposeshuffle(nsplits, W, true)
for partition ∈ 0:(N>>>(nsplits+1))-1
for _n1 ∈ 1:vecstride
n1 = partition * partition_stride + _n1
n2 = n1 + vecstride
v11 = vectors1[n1]
v12 = vectors1[n2]
v21 = vectors2[n1]
v22 = vectors2[n2]
shuff1 = Expr(:call, :shufflevector, v11, v12, shuffle0)
shuff2 = Expr(:call, :shufflevector, v11, v12, shuffle1)
push!(q.args, Expr(:(=), v21, shuff1))
push!(q.args, Expr(:(=), v22, shuff2))
end
end
vectors1, vectors2 = vectors2, vectors1
vecstride <<= 1
partition_stride <<= 1
# @show vecstride <<= 1
end
# @show vecstride, partition_stride
t = Expr(:tuple)
o = 0
for i ∈ 1:(1<<(log2W-log2N))
extract = subset_tup(N, o)
for n ∈ 1:N
push!(t.args, Expr(:call, :shufflevector, vectors1[n], extract))
end
o += N
end
# for n ∈ 1:N
# push!(q.args, Expr(:(=), vectors1[n], Expr(:call, Expr(:(.), :VectorizationBase, QuoteNode(:vstore!)), :ptrB, vectors1[n], Expr(:tuple, z, n-1))))
# end
push!(q.args, Expr(:call, :VecUnroll, t))
q
end
function transpose_vecunroll_quote_W_smaller(N, W)
(ispow2(W) & ispow2(N)) || throw(
ArgumentError(
"Only supports powers of 2 for vector width and unrolling factor, but recieved $N and $W."
)
)
N, W = W, N
log2W = intlog2(W)
log2N = intlog2(N)
q = Expr(:block, Expr(:meta, :inline), :(vud = data(vu)))
# N = W # N vectors of length W
vectors1 = [Symbol(:v_, n) for n ∈ 0:N-1]
vectors2 = [Symbol(:v_, n + N) for n ∈ 0:N-1]
# z = Expr(:call, Expr(:curly, Expr(:(.), :VectorizationBase, QuoteNode(:MM)), W), 0)
# for n ∈ 1:N
# push!(q.args, Expr(:(=), vectors1[n], Expr(:call, Expr(:(.), :VectorizationBase, QuoteNode(:vload)), :ptrA, Expr(:tuple, z, n-1))))
# end
vectors3 = [Symbol(:vpiece_, w) for w ∈ 0:W-1]
for w ∈ 1:W
push!(
q.args,
Expr(:(=), vectors3[w], Expr(:call, :getfield, :vud, w, false))
)
end
Wtemp = W
exprs = Vector{Expr}(undef, W >>> 1)
initstride = W >>> (log2W - log2N)
Ntemp = N
# Wtemp = W >>> 1
Wratio_init = W ÷ N
Wratio = Wratio_init
while Wratio > 1
Wratioh = Wratio >>> 1
for w ∈ 0:(Wratioh)-1
i = (2N) * w
j = i + N
for n ∈ 1:N
exprs[n+N*w] = if Wratio == Wratio_init
Expr(:call, :vcat, vectors3[i+n], vectors3[j+n])
else
Expr(:call, :vcat, exprs[i+n], exprs[j+n])
end
end
end
Wratio = Wratioh
end
for n ∈ 1:N
push!(q.args, Expr(:(=), vectors1[n], exprs[n]))
end
Nhalf = N >>> 1
vecstride = 1
partition_stride = 2
for nsplits = 0:log2N-1
shuffle0 = transposeshuffle(nsplits, W, false)
shuffle1 = transposeshuffle(nsplits, W, true)
for partition ∈ 0:(N>>>(nsplits+1))-1
for _n1 ∈ 1:vecstride
n1 = partition * partition_stride + _n1
n2 = n1 + vecstride
v11 = vectors1[n1]
v12 = vectors1[n2]
v21 = vectors2[n1]
v22 = vectors2[n2]
shuff1 = Expr(:call, :shufflevector, v11, v12, shuffle0)
shuff2 = Expr(:call, :shufflevector, v11, v12, shuffle1)
push!(q.args, Expr(:(=), v21, shuff1))
push!(q.args, Expr(:(=), v22, shuff2))
end
end
vectors1, vectors2 = vectors2, vectors1
vecstride <<= 1
partition_stride <<= 1
# @show vecstride <<= 1
end
# @show vecstride, partition_stride
t = Expr(:tuple)
for n ∈ 1:N
push!(t.args, vectors1[n])
end
push!(q.args, Expr(:call, :VecUnroll, t))
q
end
@generated function transpose_vecunroll(vu::VecUnroll{N,W}) where {N,W}
# N+1 == W || throw(ArgumentError("Transposing is currently only supported for sets of vectors of size equal to their length, but received $(N+1) vectors of length $W."))
# 1+2
if N + 1 == W
W == 1 && return :vu
transpose_vecunroll_quote(W)
elseif W == 1
v = Expr(:call, :Vec)
for n ∈ 0:N
push!(v.args, Expr(:call, GlobalRef(Core, :getfield), :vud, n + 1, false))
end
Expr(:block, Expr(:meta, :inline), :(vud = data(vu)), v)
elseif N + 1 < W
transpose_vecunroll_quote_W_larger(N + 1, W)
else# N+1 > W
transpose_vecunroll_quote_W_smaller(N + 1, W)
end
# code below lets LLVM do it.
# q = Expr(:block, Expr(:meta,:inline), :(vud = data(vu)))
# S = W
# syms = Vector{Symbol}(undef, W)
# gf = GlobalRef(Core, :getfield)
# for w ∈ 1:W
# syms[w] = v = Symbol(:v_, w)
# push!(q.args, Expr(:(=), v, Expr(:call, gf, :vud, w, false)))
# end
# while S > 1
# S >>>= 1
# for s ∈ 1:S
# v1 = syms[2s-1]
# v2 = syms[2s ]
# vc = Symbol(v1,:_,v2)
# push!(q.args, Expr(:(=), vc, Expr(:call, :vcat, v1, v2)))
# syms[s] = vc
# end
# end
# t = Expr(:tuple)
# v1 = syms[1];# v2 = syms[2]
# for w1 ∈ 0:N
# shufftup = Expr(:tuple)
# for w2 ∈ 0:N
# push!(shufftup.args, w2*W + w1)
# end
# push!(t.args, Expr(:call, :shufflevector, v1, Expr(:call, Expr(:curly, :Val, shufftup))))
# # push!(t.args, Expr(:call, :shufflevector, v1, v2, Expr(:call, Expr(:curly, :Val, shufftup))))
# end
# push!(q.args, Expr(:call, :VecUnroll, t))
# q
end
@generated function vec_to_vecunroll(v::AbstractSIMDVector{W}) where {W}
t = Expr(:tuple)
for w ∈ 0:W-1
push!(t.args, :(extractelement(v, $w)))
end
Expr(:block, Expr(:meta, :inline), :(VecUnroll($t)))
end
@inline shufflevector(vxu::VecUnroll, ::Val{I}) where {I} =
VecUnroll(fmap(shufflevector, data(vxu), Val{I}()))
shuffleexpr(s::Expr) =
Expr(:block, Expr(:meta, :inline), :(shufflevector(vx, Val{$s}())))
"""
vpermilps177(vx::AbstractSIMD)
Vec(0, 1, 2, 3, 4, 5, 6, 7) ->
Vec(1, 0, 3, 2, 5, 4, 7, 6)
"""
@generated function vpermilps177(vx::AbstractSIMD{W}) where {W}
s = Expr(:tuple)
for w ∈ 1:2:W
push!(s.args, w, w - 1)
end
shuffleexpr(s)
end
"""
vmovsldup(vx::AbstractSIMD)
Vec(0, 1, 2, 3, 4, 5, 6, 7) ->
Vec(0, 0, 2, 2, 4, 4, 6, 6),
"""
@generated function vmovsldup(vx::AbstractSIMD{W}) where {W}
sl = Expr(:tuple)
for w ∈ 1:2:W
push!(sl.args, w - 1, w - 1)
end
shuffleexpr(sl)
end
"""
vmovshdup(vx::AbstractSIMD)
Vec(0, 1, 2, 3, 4, 5, 6, 7) ->
Vec(1, 1, 3, 3, 5, 5, 7, 7)
"""
@generated function vmovshdup(vx::AbstractSIMD{W}) where {W}
sh = Expr(:tuple)
for w ∈ 1:2:W
push!(sh.args, w, w)
end
shuffleexpr(sh)
end
@generated function uppervector(vx::AbstractSIMD{W}) where {W}
s = Expr(:tuple)
for i ∈ W>>>1:W-1
push!(s.args, i)
end
shuffleexpr(s)
end
@generated function lowervector(vx::AbstractSIMD{W}) where {W}
s = Expr(:tuple)
for i ∈ 0:(W>>>1)-1
push!(s.args, i)
end
shuffleexpr(s)
end
@inline splitvector(vx::AbstractSIMD) = lowervector(vx), uppervector(vx)
@generated function extractupper(vx::AbstractSIMD{W}) where {W}
s = Expr(:tuple)
for i ∈ 0:(W>>>1)-1
push!(s.args, 2i)
end
shuffleexpr(s)
end
@generated function extractlower(vx::AbstractSIMD{W}) where {W}
s = Expr(:tuple)
for i ∈ 0:(W>>>1)-1
push!(s.args, 2i + 1)
end
shuffleexpr(s)
end
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 2757 |
@inline function vfmaddsub(
x::AbstractSIMD{W},
y::AbstractSIMD{W},
z::AbstractSIMD{W},
::False
) where {W}
muladd(x, y, ifelse(isodd(MM{W}(Zero())), z, -z))
end
@inline function vfmsubadd(
x::AbstractSIMD{W},
y::AbstractSIMD{W},
z::AbstractSIMD{W},
::False
) where {W}
muladd(x, y, ifelse(iseven(MM{W}(Zero())), z, -z))
end
function vfmaddsub_expr(W::Int, double::Bool, addsub::Bool, avx512::Bool)
@assert ispow2(W)
t = double ? 'd' : 's'
typ = double ? "double" : "float"
bits = double ? 64W : 32W
@assert bits ≤ (avx512 ? 512 : 256)
vtyp = "<$W x $typ>"
addsubstr = addsub ? "addsub" : "subadd"
if avx512 && bits > 256
m = addsub ? "mask" : "mask3"
op = "@llvm.x86.avx512.$m.vfm$(addsubstr).p$(t).$(bits)"
decl = "$op($vtyp, $vtyp, $vtyp, i$(W), i32)"
call = "$op($vtyp %0, $vtyp %1, $vtyp %2, i$(W) -1, i32 4)"
else
op = "@llvm.x86.fma.vfm$(addsubstr).p$(t)"
if bits == 256
op *= ".256"
end
decl = "$op($vtyp, $vtyp, $vtyp)"
call = "$op($vtyp %0, $vtyp %1, $vtyp %2)"
end
decl = "declare $vtyp " * decl
instrs = "%res = call $vtyp $call\n ret $vtyp %res"
jtyp = double ? :Float64 : :Float32
llvmcall_expr(
decl,
instrs,
:(_Vec{$W,$jtyp}),
:(Tuple{_Vec{$W,$jtyp},_Vec{$W,$jtyp},_Vec{$W,$jtyp}}),
vtyp,
[vtyp, vtyp, vtyp],
[:(data(x)), :(data(y)), :(data(z))]
)
end
@inline unwrapvecunroll(x::Vec) = x
@inline unwrapvecunroll(x::VecUnroll) = data(x)
@inline unwrapvecunroll(x::AbstractSIMD) = Vec(x)
for (f, b) ∈ [(:vfmaddsub, true), (:vfmsubadd, false)]
@eval begin
@generated function $f(
x::Vec{W,T},
y::Vec{W,T},
z::Vec{W,T},
::True,
::True
) where {W,T<:Union{Float32,Float64}}
vfmaddsub_expr(W, T === Float64, $b, true)
end
@generated function $f(
x::Vec{W,T},
y::Vec{W,T},
z::Vec{W,T},
::True,
::False
) where {W,T<:Union{Float32,Float64}}
vfmaddsub_expr(W, T === Float64, $b, false)
end
@inline $f(
x::AbstractSIMD{W,T},
y::AbstractSIMD{W,T},
z::AbstractSIMD{W,T}
) where {W,T<:Union{Float32,Float64}} =
$f(x, y, z, has_feature(Val(:x86_64_fma)))
@inline $f(
x::Vec{W,T},
y::Vec{W,T},
z::Vec{W,T},
::True
) where {W,T<:Union{Float32,Float64}} =
$f(x, y, z, True(), has_feature(Val(:x86_64_avx512f)))
@inline function $f(
x::AbstractSIMD{W,T},
y::AbstractSIMD{W,T},
z::AbstractSIMD{W,T},
::True
) where {W,T<:Union{Float32,Float64}}
VecUnroll(
fmap(
$f,
unwrapvecunroll(x),
unwrapvecunroll(y),
unwrapvecunroll(z),
True()
)
)
end
end
end
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 13532 | # The SLEEF.jl package is licensed under the MIT "Expat" License:
# > Copyright (c) 2016: Mustafa Mohamad and other contributors:
# >
# > https://github.com/musm/SLEEF.jl/graphs/contributors
# >
# > Permission is hereby granted, free of charge, to any person obtaining a copy
# > of this software and associated documentation files (the "Software"), to deal
# > in the Software without restriction, including without limitation the rights
# > to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# > copies of the Software, and to permit persons to whom the Software is
# > furnished to do so, subject to the following conditions:
# >
# > The above copyright notice and this permission notice shall be included in all
# > copies or substantial portions of the Software.
# >
# > THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# > IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# > FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# > AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# > LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# > OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# > SOFTWARE.
# >
# SLEEF.jl includes ported code from the following project
# - [SLEEF](https://github.com/shibatch/SLEEF) [public domain] Author Naoki Shibata
using Base.Math: IEEEFloat
for (op, f, ff) ∈ [
("fadd", :add_ieee, :(+)),
("fsub", :sub_ieee, :(-)),
("fmul", :mul_ieee, :(*)),
("fdiv", :fdiv_ieee, :(/)),
("frem", :rem_ieee, :(%))
]
@eval begin
@generated $f(
v1::Vec{W,T},
v2::Vec{W,T}
) where {W,T<:Union{Float32,Float64}} =
VectorizationBase.binary_op($op, W, T)
@inline $f(s1::T, s2::T) where {T<:Union{Float32,Float64}} = $ff(s1, s2)
@inline $f(args::Vararg{Any,K}) where {K} = $f(promote(args...)...)
@inline $f(a::VecUnroll, b::VecUnroll) = VecUnroll(
VectorizationBase.fmap(
$f,
VectorizationBase.data(a),
VectorizationBase.data(b)
)
)
end
end
@inline add_ieee(a, b, c) = add_ieee(add_ieee(a, b), c)
@inline add_ieee(a, b, c, d::Vararg{Any,K}) where {K} =
add_ieee(add_ieee(a, b), add_ieee(c, d...))
function sub_ieee!(ex)
ex isa Expr || return
if ex.head === :call
_f = ex.args[1]
if _f isa Symbol
f::Symbol = _f
if f === :(+)
ex.args[1] = :(VectorizationBase.add_ieee)
elseif f === :(-)
ex.args[1] = :(VectorizationBase.sub_ieee)
elseif f === :(*)
ex.args[1] = :(VectorizationBase.mul_ieee)
elseif f === :(/)
ex.args[1] = :(VectorizationBase.fdiv_ieee)
elseif f === :(%)
ex.args[1] = :(VectorizationBase.rem_ieee)
end
end
end
foreach(sub_ieee!, ex.args)
esc(ex)
end
macro ieee(ex)
sub_ieee!(ex)
end
const vIEEEFloat = Union{
IEEEFloat,
Vec{<:Any,<:IEEEFloat},
VectorizationBase.VecUnroll{<:Any,<:Any,<:IEEEFloat}
}
struct Double{T<:vIEEEFloat} <: Number
hi::T
lo::T
end
@inline Double(x::T) where {T<:vIEEEFloat} = Double(x, zero(T))
@inline Double(x::Vec, y::Vec) = Double(Vec(data(x)), Vec(data(y)))
@inline Base.convert(
::Type{Double{V}},
v::Vec
) where {W,T,V<:AbstractSIMD{W,T}} = Double(convert(V, v), vzero(V))
@inline Base.convert(::Type{Double{V}}, v::V) where {V<:AbstractSIMD} =
Double(v, vzero(V))
# @inline Base.convert(::Type{Double{V}}, m::Mask) where {V} = m
# @inline Base.convert(::Type{Double{Mask{W,U}}}, m::Mask{W,U}) where {W,U} = m
@inline Base.convert(
::Type{Double{V}},
d::Double{T}
) where {W,T,V<:AbstractSIMD{W,T}} =
Double(vbroadcast(Val{W}(), d.hi), vbroadcast(Val{W}(), d.lo))
@inline Base.eltype(d::Double) = eltype(d.hi)
(::Type{T})(x::Double{T}) where {T<:vIEEEFloat} = x.hi + x.lo
Base.issubnormal(d::Double) = issubnormal(d.hi) | issubnormal(d.lo)
@inline Base.eltype(d::Double{T}) where {T<:IEEEFloat} = T
@inline Base.eltype(d::Double{S}) where {N,T,S<:Union{Vec{N,T},Vec{N,T}}} = T
# @inline ifelse(u::Bool, v1::Double, v2::Double) =
# Double(ifelse(u, v1.hi, v2.hi), ifelse(u, v1.lo, v2.lo))
@inline ifelse(u::Mask, v1::Double, v2::Double) =
Double(ifelse(u, v1.hi, v2.hi), ifelse(u, v1.lo, v2.lo))
@generated function ifelse(
m::VecUnroll{N,W,T},
v1::Double{V1},
v2::Double{V2}
) where {N,W,T,V1,V2}
q = Expr(
:block,
Expr(:meta, :inline),
:(md = data(m)),
:(v1h = v1.hi),
:(v2h = v2.hi),
:(v1l = v1.lo),
:(v2l = v2.lo)
)
if V1 <: VecUnroll
push!(q.args, :(v1hd = data(v1h)))
push!(q.args, :(v1ld = data(v1l)))
end
if V2 <: VecUnroll
push!(q.args, :(v2hd = data(v2h)))
push!(q.args, :(v2ld = data(v2l)))
end
th = Expr(:tuple)
tl = Expr(:tuple)
gf = GlobalRef(Core, :getfield)
for n ∈ 1:N+1
ifelseₕ = Expr(:call, :ifelse, Expr(:call, gf, :md, n, false))
ifelseₗ = Expr(:call, :ifelse, Expr(:call, gf, :md, n, false))
if V1 <: VecUnroll
push!(ifelseₕ.args, Expr(:call, gf, :v1hd, n, false))
push!(ifelseₗ.args, Expr(:call, gf, :v1ld, n, false))
else
push!(ifelseₕ.args, :v1h)
push!(ifelseₗ.args, :v1l)
end
if V2 <: VecUnroll
push!(ifelseₕ.args, Expr(:call, gf, :v2hd, n, false))
push!(ifelseₗ.args, Expr(:call, gf, :v2ld, n, false))
else
push!(ifelseₕ.args, :v2h)
push!(ifelseₕ.args, :v2l)
end
push!(th.args, ifelseₕ)
push!(tl.args, ifelseₗ)
end
push!(q.args, :(Double(VecUnroll($th), VecUnroll($tl))))
q
end
@inline trunclo(x::Float64) =
reinterpret(Float64, reinterpret(UInt64, x) & 0xffff_ffff_f800_0000) # clear lower 27 bits (leave upper 26 bits)
@inline trunclo(x::Float32) =
reinterpret(Float32, reinterpret(UInt32, x) & 0xffff_f000) # clear lowest 12 bits (leave upper 12 bits)
# @inline trunclo(x::VecProduct) = trunclo(Vec(data(x)))
@inline function trunclo(x::AbstractSIMD{N,Float64}) where {N}
reinterpret(
Vec{N,Float64},
reinterpret(Vec{N,UInt64}, x) &
convert(Vec{N,UInt64}, 0xffff_ffff_f800_0000)
) # clear lower 27 bits (leave upper 26 bits)
end
@inline function trunclo(x::AbstractSIMD{N,Float32}) where {N}
reinterpret(
Vec{N,Float32},
reinterpret(Vec{N,UInt32}, x) & convert(Vec{N,UInt32}, 0xffff_f000)
) # clear lowest 12 bits (leave upper 12 bits)
end
@inline function splitprec(x::vIEEEFloat)
hx = trunclo(x)
hx, x - hx
end
@inline function dnormalize(x::Double{T}) where {T}
r = x.hi + x.lo
Double(r, (x.hi - r) + x.lo)
end
@inline Base.flipsign(x::Double{<:vIEEEFloat}, y::vIEEEFloat) =
Double(flipsign(x.hi, y), flipsign(x.lo, y))
@inline scale(x::Double{<:vIEEEFloat}, s::vIEEEFloat) =
Double(s * x.hi, s * x.lo)
@inline Base.:(-)(x::Double{T}) where {T<:vIEEEFloat} = Double(-x.hi, -x.lo)
@inline function Base.:(<)(x::Double{<:vIEEEFloat}, y::Double{<:vIEEEFloat})
x.hi < y.hi
end
@inline Base.:(<)(x::Double{<:vIEEEFloat}, y::Union{Number,Vec}) = x.hi < y
@inline Base.:(<)(x::Union{Number,Vec}, y::Double{<:vIEEEFloat}) = x < y.hi
# quick-two-sum x+y
@inline function dadd(x::vIEEEFloat, y::vIEEEFloat) #WARNING |x| >= |y|
s = x + y
Double(s, ((x - s) + y))
end
@inline function dadd(x::vIEEEFloat, y::Double{<:vIEEEFloat}) #WARNING |x| >= |y|
s = x + y.hi
Double(s, (((x - s) + y.hi) + y.lo))
end
@inline function dadd(x::Double{<:vIEEEFloat}, y::vIEEEFloat) #WARNING |x| >= |y|
s = x.hi + y
Double(s, (((x.hi - s) + y) + x.lo))
end
@inline function dadd(x::Double{<:vIEEEFloat}, y::Double{<:vIEEEFloat}) #WARNING |x| >= |y|
s = x.hi + y.hi
Double(s, ((((x.hi - s) + y.hi) + y.lo) + x.lo))
end
@inline function dsub(x::Double{<:vIEEEFloat}, y::Double{<:vIEEEFloat}) #WARNING |x| >= |y|
s = x.hi - y.hi
Double(s, ((((x.hi - s) - y.hi) - y.lo) + x.lo))
end
@inline function dsub(x::Double{<:vIEEEFloat}, y::vIEEEFloat) #WARNING |x| >= |y|
s = x.hi - y
Double(s, (((x.hi - s) - y) + x.lo))
end
@inline function dsub(x::vIEEEFloat, y::Double{<:vIEEEFloat}) #WARNING |x| >= |y|
s = x - y.hi
Double(s, (((x - s) - y.hi - y.lo)))
end
@inline function dsub(x::vIEEEFloat, y::vIEEEFloat) #WARNING |x| >= |y|
s = x - y
Double(s, ((x - s) - y))
end
# two-sum x+y NO BRANCH
@inline function dadd2(x::vIEEEFloat, y::vIEEEFloat)
s = x + y
v = s - x
Double(s, ((x - (s - v)) + (y - v)))
end
@inline function dadd2(x::vIEEEFloat, y::Double{<:vIEEEFloat})
s = x + y.hi
v = s - x
Double(s, (x - (s - v)) + (y.hi - v) + y.lo)
end
@inline dadd2(x::Double{<:vIEEEFloat}, y::vIEEEFloat) = dadd2(y, x)
@inline function dadd2(x::Double{<:vIEEEFloat}, y::Double{<:vIEEEFloat})
s = (x.hi + y.hi)
v = (s - x.hi)
smv = (s - v)
yhimv = (y.hi - v)
Double(s, ((((x.hi - smv) + yhimv) + x.lo) + y.lo))
end
@inline function dsub2(x::vIEEEFloat, y::vIEEEFloat)
s = x - y
v = s - x
Double(s, ((x - (s - v)) - (y + v)))
end
@inline function dsub2(x::vIEEEFloat, y::Double{<:vIEEEFloat})
s = (x - y.hi)
v = (s - x)
Double(s, (((x - (s - v)) - (y.hi + v)) - y.lo))
end
@inline function dsub2(x::Double{<:vIEEEFloat}, y::vIEEEFloat)
s = x.hi - y
v = s - x.hi
Double(s, (((x.hi - (s - v)) - (y + v)) + x.lo))
end
@inline function dsub2(x::Double{<:vIEEEFloat}, y::Double{<:vIEEEFloat})
s = x.hi - y.hi
v = s - x.hi
Double(s, ((((x.hi - (s - v)) - (y.hi + v)) + x.lo) - y.lo))
end
@inline function ifelse(
b::Mask{N},
x::Double{T1},
y::Double{T2}
) where {
N,
T<:Union{Float32,Float64},
T1<:Union{T,Vec{N,T}},
T2<:Union{T,Vec{N,T}}
}
V = Vec{N,T}
Double(ifelse(b, V(x.hi), V(y.hi)), ifelse(b, V(x.lo), V(y.lo)))
end
# two-prod-fma
@inline function dmul(x::vIEEEFloat, y::vIEEEFloat, ::True)
z = (x * y)
Double(z, vfmsub(x, y, z))
end
@inline function dmul(x::vIEEEFloat, y::vIEEEFloat, ::False)
hx, lx = splitprec(x)
hy, ly = splitprec(y)
@ieee begin
z = x * y
Double(z, (((hx * hy - z) + lx * hy + hx * ly) + lx * ly))
end
end
@inline function dmul(x::Double{<:vIEEEFloat}, y::vIEEEFloat, ::True)
z = (x.hi * y)
Double(z, vfmsub(x.hi, y, z) + x.lo * y)
end
@inline function dmul(x::Double{<:vIEEEFloat}, y::vIEEEFloat, ::False)
hx, lx = splitprec(x.hi)
hy, ly = splitprec(y)
@ieee begin
z = x.hi * y
Double(z, (hx * hy - z) + lx * hy + hx * ly + lx * ly + x.lo * y)
end
end
@inline function dmul(x::Double{<:vIEEEFloat}, y::Double{<:vIEEEFloat}, ::True)
z = x.hi * y.hi
Double(z, vfmsub(x.hi, y.hi, z) + x.hi * y.lo + x.lo * y.hi)
end
@inline function dmul(x::Double{<:vIEEEFloat}, y::Double{<:vIEEEFloat}, ::False)
hx, lx = splitprec(x.hi)
hy, ly = splitprec(y.hi)
@ieee begin
z = x.hi * y.hi
Double(
z,
(((hx * hy - z) + lx * hy + hx * ly) + lx * ly) +
x.hi * y.lo +
x.lo * y.hi
)
end
end
@inline dmul(x::vIEEEFloat, y::Double{<:vIEEEFloat}) = dmul(y, x)
@inline dmul(x, y) = dmul(x, y, fma_fast())
# x^2
@inline function dsqu(x::T, ::True) where {T<:vIEEEFloat}
z = x * x
Double(z, vfmsub(x, x, z))
end
@inline function dsqu(x::T, ::False) where {T<:vIEEEFloat}
hx, lx = splitprec(x)
@ieee begin
z = x * x
Double(z, (hx * hx - z) + lx * (hx + hx) + lx * lx)
end
end
@inline function dsqu(x::Double{T}, ::True) where {T<:vIEEEFloat}
z = x.hi * x.hi
Double(z, vfmsub(x.hi, x.hi, z) + (x.hi * (x.lo + x.lo)))
end
@inline function dsqu(x::Double{T}, ::False) where {T<:vIEEEFloat}
hx, lx = splitprec(x.hi)
@ieee begin
z = x.hi * x.hi
Double(z, (hx * hx - z) + lx * (hx + hx) + lx * lx + x.hi * (x.lo + x.lo))
end
end
@inline dsqu(x) = dsqu(x, fma_fast())
# sqrt(x)
@inline function dsqrt(x::Double{T}, ::True) where {T<:vIEEEFloat}
zhi = @fastmath sqrt(x.hi)
Double(zhi, (x.lo + vfnmadd(zhi, zhi, x.hi)) / (zhi + zhi))
end
@inline function dsqrt(x::Double{T}, ::False) where {T<:vIEEEFloat}
c = @fastmath sqrt(x.hi)
u = dsqu(c, False())
@ieee Double(c, (x.hi - u.hi - u.lo + x.lo) / (c + c))
end
@inline dsqrt(x) = dsqrt(x, fma_fast())
# x/y
@inline function ddiv(x::Double{<:vIEEEFloat}, y::Double{<:vIEEEFloat}, ::True)
invy = inv(y.hi)
zhi = (x.hi * invy)
Double(zhi, ((vfnmadd(zhi, y.hi, x.hi) + vfnmadd(zhi, y.lo, x.lo)) * invy))
end
@inline function ddiv(x::Double{<:vIEEEFloat}, y::Double{<:vIEEEFloat}, ::False)
@ieee begin
invy = one(y.hi) / y.hi
c = x.hi * invy
u = dmul(c, y.hi, False())
Double(c, ((((x.hi - u.hi) - u.lo) + x.lo) - c * y.lo) * invy)
end
end
@inline function ddiv(x::vIEEEFloat, y::vIEEEFloat, ::True)
ry = inv(y)
r = (x * ry)
Double(r, (vfnmadd(r, y, x) * ry))
end
@inline function ddiv(x::vIEEEFloat, y::vIEEEFloat, ::False)
@ieee begin
ry = one(y) / y
r = x * ry
hx, lx = splitprec(r)
hy, ly = splitprec(y)
Double(r, (((-hx * hy + r * y) - lx * hy - hx * ly) - lx * ly) * ry)
end
end
@inline ddiv(x, y) = ddiv(x, y, fma_fast())
# 1/x
@inline function drec(x::vIEEEFloat, ::True)
zhi = inv(x)
Double(zhi, (vfnmadd(zhi, x, one(eltype(x))) * zhi))
end
@inline function drec(x::vIEEEFloat, ::False)
@ieee begin
c = one(x) / x
u = dmul(c, x, False())
Double(c, (one(eltype(u.hi)) - u.hi - u.lo) * c)
end
end
@inline function drec(x::Double{<:vIEEEFloat}, ::True)
zhi = inv(x.hi)
Double(zhi, ((vfnmadd(zhi, x.hi, one(eltype(x))) - (zhi * x.lo)) * zhi))
end
@inline function drec(x::Double{<:vIEEEFloat}, ::False)
@ieee begin
c = inv(x.hi)
u = dmul(c, x.hi, False())
Double(c, (one(eltype(u.hi)) - u.hi - u.lo - c * x.lo) * c)
end
end
@inline drec(x) = drec(x, fma_fast())
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 41148 |
# `_vscalef` for architectures without `vscalef`.
# magic rounding constant: 1.5*2^52 Adding, then subtracting it from a float rounds it to an Int.
MAGIC_ROUND_CONST(::Type{Float64}) = 6.755399441055744e15
MAGIC_ROUND_CONST(::Type{Float32}) = 1.2582912f7
@inline function vscalef(
x::Union{T,AbstractSIMD{<:Any,T}},
y::Union{T,AbstractSIMD{<:Any,T}},
::False
) where {T<:Union{Float32,Float64}}
_vscalef(x, floor(y))
end
@inline signif_bits(::Type{Float32}) = 0x00000017 # 23
@inline signif_bits(::Type{Float64}) = 0x0000000000000034 # 52
@inline function _vscalef(
x::Union{T,AbstractSIMD{<:Any,T}},
y::Union{T,AbstractSIMD{<:Any,T}}
) where {T<:Union{Float32,Float64}}
N = reinterpret(Base.uinttype(T), y + MAGIC_ROUND_CONST(T))
k = N# >>> 0x00000008
small_part = reinterpret(Base.uinttype(T), x)
twopk = (k % Base.uinttype(T)) << signif_bits(T)
reinterpret(T, twopk + small_part)
end
@inline vscalef(
m::AbstractMask,
v1::AbstractSIMD,
v2::AbstractSIMD,
v3::AbstractSIMD,
::False
) = vifelse(m, vscalef(v1, v2, False()), v3)
@inline vscalef(v1::T, v2::T) where {T<:AbstractSIMD} =
vscalef(v1, v2, has_feature(Val(:x86_64_avx512f)))
@inline vscalef(m::AbstractMask, v1::T, v2::T, v3::T) where {T<:AbstractSIMD} =
vscalef(m, v1, v2, v3, has_feature(Val(:x86_64_avx512f)))
@inline vscalef(v1::T, v2::T) where {T<:Union{Float32,Float64}} =
vscalef(v1, v2, False())
@inline vscalef(
b::Bool,
v1::T,
v2::T,
v3::T
) where {T<:Union{Float32,Float64}} = b ? vscalef(v1, v2, False()) : zero(T)
@inline vscalef(v1, v2) = ((v3, v4) = promote(v1, v2); vscalef(v3, v4))
@generated function vscalef(
v1::Vec{W,T},
v2::Vec{W,T},
::True
) where {W,T<:Union{Float32,Float64}}
bits = (8W * sizeof(T))::Int
any(==(bits), (128, 256, 512)) || throw(
ArgumentError(
"Vectors are $bits bits, but only 128, 256, and 512 bits are supported."
)
)
ltyp = LLVM_TYPES[T]
vtyp = "<$W x $ltyp>"
dors = T === Float64 ? 'd' : 's'
instr = "$vtyp @llvm.x86.avx512.mask.scalef.p$(dors).$bits"
mtyp = W == 16 ? "i16" : "i8"
if bits == 512
decl = "declare $instr($vtyp, $vtyp, $vtyp, $mtyp, i32)"
# instrs = "%res = call $instr($vtyp %0, $vtyp %1, $vtyp undef, $mtyp -1, i32 11)\nret $vtyp %res"
instrs = "%res = call $instr($vtyp %0, $vtyp %1, $vtyp undef, $mtyp -1, i32 8)\nret $vtyp %res"
else
decl = "declare $instr($vtyp, $vtyp, $vtyp, $mtyp)"
instrs = "%res = call $instr($vtyp %0, $vtyp %1, $vtyp undef, $mtyp -1)\nret $vtyp %res"
end
arg_syms = [:(data(v1)), :(data(v2))]
llvmcall_expr(
decl,
instrs,
:(_Vec{$W,$T}),
:(Tuple{_Vec{$W,$T},_Vec{$W,$T}}),
vtyp,
fill(vtyp, 2),
arg_syms
)
end
@generated function vscalef(
m::AbstractMask{W},
v1::Vec{W,T},
v2::Vec{W,T},
v3::Vec{W,T},
::True
) where {W,T<:Union{Float32,Float64}}
bits = (8W * sizeof(T))::Int
any(==(bits), (128, 256, 512)) || throw(
ArgumentError(
"Vectors are $bits bits, but only 128, 256, and 512 bits are supported."
)
)
ltyp = LLVM_TYPES[T]
vtyp = "<$W x $ltyp>"
dors = T === Float64 ? 'd' : 's'
mtyp = W == 16 ? "i16" : "i8"
mtypj = W == 16 ? :UInt16 : :UInt8
instr = "$vtyp @llvm.x86.avx512.mask.scalef.p$(dors).$bits"
if bits == 512
decl = "declare $instr($vtyp, $vtyp, $vtyp, $mtyp, i32)"
instrs = "%res = call $instr($vtyp %0, $vtyp %1, $vtyp %2, $mtyp %3, i32 11)\nret $vtyp %res"
instrs = "%res = call $instr($vtyp %0, $vtyp %1, $vtyp %2, $mtyp %3, i32 8)\nret $vtyp %res"
else
decl = "declare $instr($vtyp, $vtyp, $vtyp, $mtyp)"
instrs = "%res = call $instr($vtyp %0, $vtyp %1, $vtyp %2, $mtyp %3)\nret $vtyp %res"
end
arg_syms = [:(data(v1)), :(data(v2)), :(data(v3)), :(data(m))]
llvmcall_expr(
decl,
instrs,
:(_Vec{$W,$T}),
:(Tuple{_Vec{$W,$T},_Vec{$W,$T},_Vec{$W,$T},$mtypj}),
vtyp,
[vtyp, vtyp, vtyp, mtyp],
arg_syms
)
end
@generated function vsreduce(
v::Vec{W,T},
::Val{M}
) where {W,T<:Union{Float32,Float64},M}
bits = (8W * sizeof(T))::Int
any(==(bits), (128, 256, 512)) || throw(
ArgumentError(
"Vectors are $bits bits, but only 128, 256, and 512 bits are supported."
)
)
M isa Integer || throw(
ArgumentError(
"M must be an integer, but received $M of type $(typeof(M))."
)
)
ltyp = LLVM_TYPES[T]
vtyp = "<$W x $ltyp>"
dors = T === Float64 ? 'd' : 's'
instr = "$vtyp @llvm.x86.avx512.mask.reduce.p$(dors).$bits"
mtyp = W == 16 ? "i16" : "i8"
if bits == 512
decl = "declare $instr($vtyp, i32, $vtyp, $mtyp, i32)"
instrs = "%res = call $instr($vtyp %0, i32 $M, $vtyp undef, $mtyp -1, i32 8)\nret $vtyp %res"
else
decl = "declare $instr($vtyp, i32, $vtyp, $mtyp)"
instrs = "%res = call $instr($vtyp %0, i32 $M, $vtyp undef, $mtyp -1)\nret $vtyp %res"
end
arg_syms = [:(data(v))]
llvmcall_expr(
decl,
instrs,
:(_Vec{$W,$T}),
:(Tuple{_Vec{$W,$T}}),
vtyp,
[vtyp],
arg_syms
)
end
@generated function vpermi2pd(
c::Vec{8,UInt64},
v1::Vec{8,Float64},
v2::Vec{8,Float64}
) #where {W,T<:Union{Float32,Float64}, M}
W = 8
T = Float64
bits = (8W * sizeof(T))::Int
# bits ∈ (128,256,512) || throw(ArgumentError("Vectors are $bits bits, but only 128, 256, and 512 bits are supported."))
ityp = "i$(8sizeof(T))"
vityp = "<$W x $ityp>"
ltyp = LLVM_TYPES[T]
vtyp = "<$W x $ltyp>"
dors = T === Float64 ? 'd' : 's'
instr = "$vtyp @llvm.x86.avx512.vpermi2var.p$(dors).$bits"
decl = "declare $instr($vtyp, $vityp, $vtyp)"
instrs = "%res = call $instr($vtyp %0, $vityp %1, $vtyp %2)\nret $vtyp %res"
arg_syms = [:(data(v1)), :(data(c)), :(data(v2))]
jityp = T === Float64 ? :UInt64 : :UInt32
llvmcall_expr(
decl,
instrs,
:(_Vec{$W,$T}),
:(Tuple{_Vec{$W,$T},_Vec{$W,$jityp},_Vec{$W,$T}}),
vtyp,
[vtyp, vityp, vtyp],
arg_syms
)
end
@inline vscalef(v1::VecUnroll, v2::VecUnroll) =
VecUnroll(fmap(vscalef, getfield(v1, :data), getfield(v2, :data)))
@inline vscalef(m::VecUnroll, v1::VecUnroll, v2::VecUnroll, v3::VecUnroll) =
VecUnroll(
fmap(
vscalef,
getfield(m, :data),
getfield(v1, :data),
getfield(v2, :data),
getfield(v3, :data)
)
)
@inline vsreduce(v::VecUnroll, ::Val{M}) where {M} =
VecUnroll(fmap(vsreduce, getfield(v, :data), Val{M}()))
@inline vpermi2pd(v1::VecUnroll, v2::VecUnroll, v3::VecUnroll) = VecUnroll(
fmap(
vpermi2pd,
getfield(v1, :data),
getfield(v2, :data),
getfield(v3, :data)
)
)
@inline vpermi2pd(v1::VecUnroll, v2::Vec, v3::Vec) =
VecUnroll(fmap(vpermi2pd, getfield(v1, :data), v2, v3))
# magic rounding constant: 1.5*2^52 Adding, then subtracting it from a float rounds it to an Int.
# min and max arguments by base and type
MAX_EXP(::Val{2}, ::Type{Float64}) = 1024.0 # log2 2^1023*(2-2^-52)
MIN_EXP(::Val{2}, ::Type{Float64}) = -1022.0 # log2(big(2)^-1023*(2-2^-52))
MAX_EXP(::Val{2}, ::Type{Float32}) = 128.0f0 # log2 2^127*(2-2^-52)
MIN_EXP(::Val{2}, ::Type{Float32}) = -126.0f0 # log2 2^-1075
MAX_EXP(::Val{ℯ}, ::Type{Float64}) = 709.782712893383996732 # log 2^1023*(2-2^-52)
MIN_EXP(::Val{ℯ}, ::Type{Float64}) = -708.396418532264106335 # log 2^-1075
MAX_EXP(::Val{ℯ}, ::Type{Float32}) = 88.72283905206835f0 # log 2^127 *(2-2^-23)
MIN_EXP(::Val{ℯ}, ::Type{Float32}) = -87.3365448101577555f0#-103.97207708f0 # log 2^-150
MAX_EXP(::Val{10}, ::Type{Float64}) = 308.25471555991675 # log10 2^1023*(2-2^-52)
MIN_EXP(::Val{10}, ::Type{Float64}) = -307.65260000 # log10 2^-1075
MAX_EXP(::Val{10}, ::Type{Float32}) = 38.531839419103626f0 # log10 2^127 *(2-2^-23)
MIN_EXP(::Val{10}, ::Type{Float32}) = -37.9297794795476f0 # log10 2^-127 *(2-2^-23)
# 256/log(base, 2) (For Float64 reductions)
LogBo256INV(::Val{2}, ::Type{Float64}) = 256.0
LogBo256INV(::Val{ℯ}, ::Type{Float64}) = 369.3299304675746
LogBo256INV(::Val{10}, ::Type{Float64}) = 850.4135922911647
LogBo16INV(::Val{2}, ::Type{Float64}) = 16.0
LogBo16INV(::Val{ℯ}, ::Type{Float64}) = 23.083120654223414
LogBo16INV(::Val{10}, ::Type{Float64}) = 53.150849518197795
# -log(base, 2)/256 in upper and lower bits
LogBo256U(::Val{2}, ::Type{Float64}) = -0.00390625
LogBo256U(::Val{ℯ}, ::Type{Float64}) = -0.0027076061740622863
LogBo256U(::Val{10}, ::Type{Float64}) = -0.0011758984205624266
LogBo256L(base::Val{2}, ::Type{Float64}) = 0.0
LogBo256L(base::Val{ℯ}, ::Type{Float64}) = -9.058776616587108e-20
LogBo256L(base::Val{10}, ::Type{Float64}) = 1.0952062999160822e-20
LogBo16U(::Val{2}, ::Type{Float64}) = -0.0625
LogBo16U(::Val{ℯ}, ::Type{Float64}) =
-0.04332169878499658183857700759113603550471875839751595338254250059333710137310597
LogBo16U(::Val{10}, ::Type{Float64}) =
-0.01881437472899882470085868092028081417301186759138178383190171632044426182965149
LogBo16L(base::Val{2}, ::Type{Float64}) = Zero()
LogBo16L(base::Val{ℯ}, ::Type{Float64}) = -1.4494042586539372e-18
LogBo16L(base::Val{10}, ::Type{Float64}) = 1.7523300798657315e-19
# 1/log(base, 2) (For Float32 reductions)
LogBINV(::Val{2}, ::Type{Float32}) = 1.0f0
LogBINV(::Val{ℯ}, ::Type{Float32}) = 1.442695f0
LogBINV(::Val{10}, ::Type{Float32}) = 3.321928f0
# -log(base, 2) in upper and lower bits
LogBU(::Val{2}, ::Type{Float32}) = -1.0f0
LogBU(::Val{ℯ}, ::Type{Float32}) = -0.6931472f0
LogBU(::Val{10}, ::Type{Float32}) = -0.30103f0
LogBL(::Val{2}, ::Type{Float32}) = 0.0f0
LogBL(::Val{ℯ}, ::Type{Float32}) = 1.9046542f-9
LogBL(::Val{10}, ::Type{Float32}) = 1.4320989f-8
const FloatType64 = Union{Float64,AbstractSIMD{<:Any,Float64}}
const FloatType32 = Union{Float32,AbstractSIMD{<:Any,Float32}}
# Range reduced kernels
@inline function expm1b_kernel(::Val{2}, x::FloatType64)
# c6 = 0.6931471807284470571335252997834339128744539291358546258980326560263434831636494
# c5 = 0.2402265119815758621794630410361025063296309075509484445159065872903725193960909
# c4 = 0.05550410353447979823044149277158612685395896745775325243210607075766620053156177
# c3 = 0.009618027253668450057706478612143223628979891379942570690446533010539871321541621
# c2 = 0.001333392256353875413926876917141786686018234585146983223440727245459444740967253
# c1 = 0.0001546929114168849728971327603158937595919966441732209337930866845915899223829891
# c0 = 1.520192159457321441849564286267892034534060236471603225598783028117591315796835e-05
# c5 = 0.6931472067096466099497350107329038640311532915014328403152023514862215769240471
# c4 = 0.2402265150505520926831534797602254284855354178135282005410994764797061783074115
# c3 = 0.05550327215766594452554739168596479012109775178907146059734348050730310383358696
# c2 = 0.00961799451416147891836707565892019722415069604010430702590438969636015825337285
# c1 = 0.001340043166700788064581996335332499076913713747844545061461125917537819216640726
# c0 = 0.0001547802227945780278842074081393459334029013917349238368250485753166922523500416
# x * muladd(muladd(muladd(muladd(muladd(muladd(muladd(muladd(c0,x,c1),x,c2),x,c3),x,c4),x,c5),x,c6),x,c7),x,c8)
# x * muladd(muladd(muladd(muladd(muladd(muladd(c0,x,c1),x,c2),x,c3),x,c4),x,c5),x,c6)
# x * muladd(muladd(muladd(muladd(muladd(c0,x,c1),x,c2),x,c3),x,c4),x,c5)
x * muladd(
muladd(
muladd(0.009618130135925114, x, 0.055504115022757844),
x,
0.2402265069590989
),
x,
0.6931471805599393
)
end
@inline function expm1b_kernel(::Val{ℯ}, x::FloatType64)
x * muladd(
muladd(
muladd(0.04166666762124105, x, 0.1666666704849642),
x,
0.49999999999999983
),
x,
0.9999999999999998
)
end
@inline function expm1b_kernel(::Val{10}, x::FloatType64)
x * muladd(
muladd(
muladd(
muladd(0.5393833837413015, x, 1.1712561359457612),
x,
2.0346785922926713
),
x,
2.6509490552382577
),
x,
2.302585092994046
)
end
@inline function expb_kernel(::Val{2}, x::FloatType32)
muladd(
muladd(
muladd(
muladd(
muladd(
muladd(muladd(1.5316464f-5, x, 0.00015478022f0), x, 0.0013400431f0),
x,
0.009617995f0
),
x,
0.05550327f0
),
x,
0.24022652f0
),
x,
0.6931472f0
),
x,
1.0f0
)
end
@inline function expb_kernel(::Val{ℯ}, x::FloatType32)
muladd(
muladd(
muladd(
muladd(
muladd(
muladd(
muladd(0.00019924171f0, x, 0.0013956056f0),
x,
0.008375129f0
),
x,
0.041666083f0
),
x,
0.16666415f0
),
x,
0.5f0
),
x,
1.0f0
),
x,
1.0f0
)
end
@inline function expb_kernel(::Val{10}, x::FloatType32)
muladd(
muladd(
muladd(
muladd(
muladd(
muladd(muladd(0.06837386f0, x, 0.20799689f0), x, 0.54208815f0),
x,
1.1712388f0
),
x,
2.034648f0
),
x,
2.6509492f0
),
x,
2.3025851f0
),
x,
1.0f0
)
end
const J_TABLE = Float64[2.0^(big(j - 1) / 256) for j = 1:256];
@inline fast_fma(a, b, c, ::True) = fma(a, b, c)
@inline function fast_fma(a, b, c, ::False)
d = dadd(dmul(Double(a), Double(b), False()), Double(c))
add_ieee(d.hi, d.lo)
end
@static if (Sys.ARCH === :x86_64) | (Sys.ARCH === :i686)
const TABLE_EXP_64_0 =
Vec(ntuple(j -> Core.VecElement(Float64(2.0^(big(j - 1) / 16))), Val(8)))
const TABLE_EXP_64_1 =
Vec(ntuple(j -> Core.VecElement(Float64(2.0^(big(j + 7) / 16))), Val(8)))
@inline target_trunc(v, ::VectorizationBase.True) = v
@inline target_trunc(v, ::VectorizationBase.False) = v % UInt32
@inline target_trunc(v) =
target_trunc(v, VectorizationBase.has_feature(Val(:x86_64_avx512dq)))
# @inline function vexp2_v1(x::AbstractSIMD{8,Float64})
# x16 = x
# # x16 = 16x
# r = vsreduce(x16, Val(4))
# m = x16 - r
# mfrac = m
# inds = (reinterpret(UInt64, mfrac) >> 0x000000000000002d) & 0x000000000000000f
# # @show r m mfrac reinterpret(UInt64, m) reinterpret(UInt64, mfrac)
# # js = vpermi2pd(inds, TABLE_EXP_64_0, TABLE_EXP_64_1)
# # @show m mfrac r
# small_part = expm1b_kernel(Val(2), r) + 1.0
# # js = 1.0
# # small_part = vfmadd(js, expm1b_kernel(Val(2), r), js)
# vscalef(small_part, mfrac)
# end
@inline function expm1b_kernel_16(::Val{2}, x)
c5 =
0.6931471805599457533351827593325319924753473772859614915719486459595837313933663
c4 =
0.2402265069591009431089489060897837825648676480621950809556237945205562267112511
c3 =
0.05550410865663929372911461843767669974316894963735870580154522796380984673567634
c2 =
0.00961812910613182376367867689426991348318504321185094738294343470767871628697879
c1 =
0.001333378157683735211078403326604752238340853209789619792858391909299167771871147
c0 =
0.0001540378851029625623114826398060979330719673345637296642237670082377277446583639
x * vmuladd_fast(
vmuladd_fast(
vmuladd_fast(vmuladd_fast(vmuladd_fast(c0, x, c1), x, c2), x, c3),
x,
c4
),
x,
c5
)
end
@inline function expm1b_kernel_16(::Val{ℯ}, x)
c5 =
1.000000000000000640438225946852982701258391604480638275588427888399057119915227
c4 =
0.5000000000000004803287542332217715766116071510522583538834274474935075866898906
c3 =
0.1666666666420970554527207281834431226735741940161719645985080160507073865025253
c2 =
0.04166666666018301921665935823120024420659933010915524936059730214858712998209511
c1 =
0.008333472974984405879148046293292753978131598285368755170712233472964279745777974
c0 =
0.001388912162496018623851591184688043066476532389093553363939521815388727152058955
x * vmuladd_fast(
vmuladd_fast(
vmuladd_fast(vmuladd_fast(vmuladd_fast(c0, x, c1), x, c2), x, c3),
x,
c4
),
x,
c5
)
end
@inline function expm1b_kernel_16(::Val{10}, x)
c5 =
2.302585092994047158681503503460480873860999793973827515869365761962430703319599
c4 =
2.650949055239201551934947671858339219424413336755573194223955910456821842421188
c3 =
2.034678591993528625083054018110717593492391962926107057414972651492780932119609
c2 =
1.171255148730010832148986815739840479496404930758611797062000621828706728750671
c1 =
0.5393919676343165962473794696403862709895176169091768718271240564783020989456929
c0 =
0.2069993173257113377172910397724414085027323868592924170210484263853229417141011
x * vmuladd_fast(
vmuladd_fast(
vmuladd_fast(vmuladd_fast(vmuladd_fast(c0, x, c1), x, c2), x, c3),
x,
c4
),
x,
c5
)
end
# @inline function expm1b_kernel_6(::Val{2}, x)
# c6 = 0.6931471805599453094157857128856867906777808839530204388709017060018142940776171
# c5 = 0.2402265069591008469523412357777710806331204676211882505524452303635329805655358
# c4 = 0.05550410866482161698117481145997657675888637052903928922860089114193615876343007
# c3 = 0.009618129106525681209446910869436511674612664593938506025921861852732097374219765
# c2 = 0.001333355814485111028814491629997422287837080138262113640339762815224264878390602
# c1 = 0.0001540375624549508734984308915404920724081959405205139643458055193131059150331185
# c0 = 1.525295744513115862409484203574886089068776710280414076942740530839031792795809e-05
# x * vmuladd_fast(vmuladd_fast(vmuladd_fast(vmuladd_fast(vmuladd_fast(vmuladd_fast(c0,x,c1),x,c2),x,c3),x,c4),x,c5),x,c6)
# end
# # using Remez
# # N,D,E,X = ratfn_minimax(x -> (exp2(x) - big"1")/x, [big(nextfloat(-0.03125)),big(0.03125)], 4, 0); @show(E); N
# @inline function expm1b_kernel_4(::Val{2}, x)
# c4 = 0.6931471805599461972549081995383434692316977327912755704234013405443109498729026
# c3 = 0.2402265069131940842333497738928958607054740795078596615709864445611497846077303
# c2 = 0.05550410865300270379171299778517151376504051870524903806295523325435530249981495
# c1 = 0.009618317140648284298097106744730186251913149278152053357630395863210686828434175
# c0 = 0.001333381881551676348461495248002642715422207072457864472267417920610122672570108
# x * vmuladd_fast(vmuladd_fast(vmuladd_fast(vmuladd_fast(c0,x,c1),x,c2),x,c3),x,c4)
# end
# @inline function vexp2_v4(x::AbstractSIMD{W,Float64}) where {W}
# # r - VectorizationBase.vroundscale(r, Val(16*(4)))
# r = VectorizationBase.vsreduce(x,Val(0))
# rscale = VectorizationBase.vroundscale(r, Val(64))
# rs = r - rscale
# inds = convert(UInt, vsreduce(rscale, Val(1))*16.0)
# expr = expm1b_kernel_5(Val(2), rs)
# N_float = x - rs
# # @show inds rs N_float
# js = vpermi2pd(inds, TABLE_EXP_64_0, TABLE_EXP_64_1)
# small_part = vfmadd(js, expr, js)
# res = vscalef(small_part, N_float)
# return res
# end
####################################################################################################
################################## Non-AVX512 implementation #######################################
####################################################################################################
# With AVX512, we use a tiny look-up table of just 16 numbers for `Float64`,
# because we can perform the lookup using `vpermi2pd`, which is much faster than gather.
# To compensate, we need a larger polynomial.
# Because of the larger polynomial, this implementation works better on systems with 2 FMA units.
@inline function vexp2(x::AbstractSIMD{8,Float64}, ::True)
# M = 64 >> 4 = 4
# r = x - round(2^M * x)*2^-M
r = vsreduce(x, Val(64))
N_float = x - r
expr = expm1b_kernel_16(Val(2), r)
inds = convert(UInt64, vsreduce(N_float, Val(1)) * 16.0)
# inds = ((trunc(Int64, 16.0*N_float)%UInt64)) & 0x000000000000000f
js = vpermi2pd(inds, TABLE_EXP_64_0, TABLE_EXP_64_1)
small_part = vfmadd(js, expr, js)
res = vscalef(small_part, N_float)
return res
end
# @inline function vexp2_v2(x::AbstractSIMD{8,Float64}, ::True)#, ::Val{N}) where {N}
# r1 = vsreduce(x, Val(0))
# m = x - r1
# r = vfmsub(vsreduce(r1 * 16.0, Val(1)), 0.0625, 0.5)
# j = r1 - r
# js = vpermi2pd(convert(UInt, j), TABLE_EXP_64_0, TABLE_EXP_64_1)
# expr = expm1b_kernel_5(Val(2), r) # 2^r - 1
# small_part = vfmadd(js, expr, js)
# # @show r m j
# vscalef(small_part, m)
# end
# @inline function vexp_v3(x::AbstractSIMD{8,Float64}, ::True)#, ::Val{N}) where {N}
# xl2e = mul_ieee(1.4426950408889634, x)
# r1 = vsreduce(xl2e, Val(0))
# m = xl2e - r1
# r = vfmsub(vsreduce(r1 * 16.0, Val(1)), 0.0625, 0.5)
# j = r1 - r
# js = vpermi2pd(convert(UInt, j), TABLE_EXP_64_0, TABLE_EXP_64_1)
# rs = vfnmadd(0.6931471805599453094172321214581765680755001343602552541206800094933936219696955, m+j, x)
# expr = expm1b_kernel_5(Val(ℯ), r) # 2^r - 1
# small_part = vfmadd(js, expr, js)
# @show r m j
# vscalef(small_part, m)
# end
# _log2(::Val{ℯ}) = 1.4426950408889634
# invlog2hi(::Val{ℯ}) = 0.6931471805599453094172321214581765680755001343602552541206800094933936219696955
# invlog2lo(::Val{ℯ}) = -2.319046813846299615494855463875478650412068000949339362196969553467383712860567e-17
# _log2(::Val{10}) = 3.321928094887362347870319429489390175864831393024580612054756395815934776608624
# invlog2hi(::Val{10}) = 0.3010299956639811952137388947244930267681898814621085413104274611271081892744238
# invlog2lo(::Val{10}) = 2.803728127785170339013117338996875833689572538872891810725576172209659522828247e-18
# Requires two more floating point μops, but 8 less loading μops than the default version.
# This thus microbenchmarks a little worse, but the theory is that using less cache than the
# 256 Float64 * 8 bytes/Float64 = 2 KiB table may improve real world performance / reduce
# random latency.
@inline function vexp_avx512(x::AbstractSIMD{8,Float64}, ::Val{B}) where {B}
N_float = round(x * LogBo16INV(Val(B), Float64))
r = muladd(N_float, LogBo16U(Val(B), Float64), x)
r = muladd(N_float, LogBo16L(Val(B), Float64), r)
inds = ((trunc(Int64, N_float) % UInt64)) & 0x000000000000000f
expr = expm1b_kernel_16(Val(B), r)
js = vpermi2pd(inds, TABLE_EXP_64_0, TABLE_EXP_64_1)
small_part = vfmadd(js, expr, js)
res = vscalef(small_part, 0.0625 * N_float)
return res
end
@inline function vexp_avx512(x::AbstractSIMD{W,Float64}, ::Val{B}) where {W,B}
N_float =
muladd(x, LogBo256INV(Val{B}(), Float64), MAGIC_ROUND_CONST(Float64))
N = target_trunc(reinterpret(UInt64, N_float))
N_float = N_float - MAGIC_ROUND_CONST(Float64)
r = fma(N_float, LogBo256U(Val{B}(), Float64), x)
r = fma(N_float, LogBo256L(Val{B}(), Float64), r)
# @show (N & 0x000000ff) % Int
# @show N N & 0x000000ff
js = vload(
VectorizationBase.zero_offsets(stridedpointer(J_TABLE)),
(N & 0x000000ff,)
)
# k = N >>> 0x00000008
# small_part = reinterpret(UInt64, vfmadd(js, expm1b_kernel(Val{B}(), r), js))
small_part = vfmadd(js, expm1b_kernel(Val{B}(), r), js)
# return reinterpret(Float64, small_part), r, k, N_float, js
res = vscalef(small_part, 0.00390625 * N_float)
# twopk = (k % UInt64) << 0x0000000000000034
# res = reinterpret(Float64, twopk + small_part)
return res
end
@inline function vexp_avx512(
x::Union{Float32,AbstractSIMD{<:Any,Float32}},
::Val{B}
) where {B}
N_float = vfmadd(x, LogBINV(Val{B}(), Float32), MAGIC_ROUND_CONST(Float32))
N_float = (N_float - MAGIC_ROUND_CONST(Float32))
r = fma(N_float, LogBU(Val{B}(), Float32), x)
r = fma(N_float, LogBL(Val{B}(), Float32), r)
small_part = expb_kernel(Val{B}(), r)
return vscalef(small_part, N_float)
end
@inline function vexp2(x::AbstractSIMD{<:Any,Float32}, ::True)
r = vsreduce(x, Val(0))
N_float = x - r
small_part = expb_kernel(Val{2}(), r)
return vscalef(small_part, N_float)
end
# @inline function vexp_test(x::AbstractSIMD{16,Float32})#, ::True)
# xb = x * LogBINV(Val{ℯ}(), Float32)
# # rs = xb - round(xb)
# rs = vsreduce(xb, Val(0))
# N_float = xb - rs
# # rs = x*log2(ℯ) - N_float
# # r = fma(x, Float32(log2(ℯ)), - N_float)
# # rs = x*(l2_hi + l2_lo) - N_float
# # rs = x*l2_hi - N_float + x*l2_lo
# # r = fma(x, 1.925963f-8, rs)
# # small_part = expb_kernel(Val{2}(), r)
# # B = ℯ
# # r = fma(N_float, LogBU(Val{B}(), Float32), x)
# # r = fma(N_float, LogBL(Val{B}(), Float32), r)
# # small_part = expb_kernel(Val{B}(), r)
# rv2 = fma(1.442695f0, x, -N_float)
# rv2 = fma(1.925963f-8, x, rv2)
# small_part = expb_kernel(Val{2}(), rv2)
# # @show rs r rs / r rv2
# # xb = x * log2(ℯ )
# # rs = xb - N_float
# # rs = x * log2(ℯ) - N_float
# # vs, desierd:
# # r = x - N_float * log(2)
# # r = x - N_float / log2(ℯ)
# # r = rs / log2(ℯ)
# # r = 0.6931471805599453f0 * rs
# # small_part = expb_kernel(Val{2}(), r)
# return vscalef(small_part, N_float)
# end
# @inline vexp_test(x::AbstractSIMD{16}) = vexp_test(Float32(x))
# @inline vexp_test(x::Vec{8}) = shufflevector(
# vexp_test(
# shufflevector(
# x,
# x,
# Val((0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15))
# )
# ),
# Val((0, 1, 2, 3, 4, 5, 6, 7))
# )
# @inline vexp_test(x::Vec{4}) = shufflevector(
# vexp_test(shufflevector(x, x, Val((0, 1, 2, 3, 4, 5, 6, 7)))),
# Val((0, 1, 2, 3))
# )
# @inline vexp_test(x::Vec{2}) = shufflevector(
# vexp_test(shufflevector(x, x, Val((0, 1, 2, 3)))),
# Val((0, 1))
# )
# @inline vexp_test(x::VecUnroll) = VecUnroll(fmap(vexp_test, data(x)))
# @inline vexp_test(x::Float32) = vexp_test(Vec(x))(1)
else# if !((Sys.ARCH === :x86_64) | (Sys.ARCH === :i686))
const target_trunc = identity
end
# @inline function vexp_avx512(vu::VecUnroll{1,8,Float64,Vec{8,Float64}}, ::Val{B}) where {B}
# x, y = data(vu)
# N_float₁ = round(x*LogBo16INV(Val(B), Float64))
# N_float₂ = muladd(y, LogBo256INV(Val{B}(), Float64), MAGIC_ROUND_CONST(Float64))
# r₁ = muladd(N_float₁, LogBo16U(Val(B), Float64), x)
# N₂ = target_trunc(reinterpret(UInt64, N_float₂))
# r₁ = muladd(N_float₁, LogBo16L(Val(B), Float64), r₁)
# js₂ = vload(VectorizationBase.zero_offsets(stridedpointer(J_TABLE)), (N₂ & 0x000000ff,))
# N_float₂ = N_float₂ - MAGIC_ROUND_CONST(Float64)
# inds₁ = ((trunc(Int64, N_float₁)%UInt64)) & 0x000000000000000f
# r₂ = fma(N_float₂, LogBo256U(Val{B}(), Float64), y)
# expr₁ = expm1b_kernel_16(Val(B), r₁)
# r₂ = fma(N_float₂, LogBo256L(Val{B}(), Float64), r₂)
# js₁ = vpermi2pd(inds₁, TABLE_EXP_64_0, TABLE_EXP_64_1)
# small_part₁ = vfmadd(js₁, expr₁, js₁)
# small_part₂ = vfmadd(js₂, expm1b_kernel(Val{B}(), r₂), js₂)
# res₁ = vscalef(small_part₁, 0.0625*N_float₁)
# res₂ = vscalef(small_part₂, 0.00390625*N_float₂)
# return VecUnroll((res₁, res₂))
# end
# @inline _vexp(x, ::True) = vexp2( 1.4426950408889634 * x, True() )
# @inline _vexp(x, ::True) = vexp2( mul_ieee(1.4426950408889634, x), True() )
# @inline _vexp10(x, ::True) = vexp2( 3.321928094887362 * x, True() )
# @inline _vexp(x) = _vexp(x, has_feature(Val(:x86_64_avx512f)))
# @inline _vexp10(x) = _vexp10(x, has_feature(Val(:x86_64_avx512f)))
@inline vexp(x::AbstractSIMD, ::True) = vexp_avx512(x, Val(ℯ))
@inline vexp2(x::AbstractSIMD, ::True) = vexp_avx512(x, Val(2))
@inline vexp10(x::AbstractSIMD, ::True) = vexp_avx512(x, Val(10))
# @inline vexp(x::AbstractSIMD{W,Float32}, ::True) where {W} = vexp_generic(x, Val(ℯ))
# @inline vexp2(x::AbstractSIMD{W,Float32}, ::True) where {W} = vexp_generic(x, Val(2))
# @inline vexp10(x::AbstractSIMD{W,Float32}, ::True) where {W} = vexp_generic(x, Val(10))
@inline Base.exp(v::AbstractSIMD{W}) where {W} = vexp(float(v))
@inline Base.exp2(v::AbstractSIMD{W}) where {W} = vexp2(float(v))
@inline Base.exp10(v::AbstractSIMD{W}) where {W} = vexp10(float(v))
@static if (Base.libllvm_version ≥ v"11") &
((Sys.ARCH === :x86_64) | (Sys.ARCH === :i686))
@inline vexp(v::AbstractSIMD) =
vexp(float(v), has_feature(Val(:x86_64_avx512f)))
@inline vexp2(v::AbstractSIMD) =
vexp2(float(v), has_feature(Val(:x86_64_avx512f)))
@inline vexp10(v::AbstractSIMD) =
vexp10(float(v), has_feature(Val(:x86_64_avx512f)))
else
@inline vexp(v::AbstractSIMD) = vexp(float(v), False())
@inline vexp2(v::AbstractSIMD) = vexp2(float(v), False())
@inline vexp10(v::AbstractSIMD) = vexp10(float(v), False())
end
@inline vexp(v::Union{Float32,Float64}) = vexp(v, False())
@inline vexp2(v::Union{Float32,Float64}) = vexp2(v, False())
@inline vexp10(v::Union{Float32,Float64}) = vexp10(v, False())
@inline vexp(v::AbstractSIMD{2,Float32}) = vexp(v, False())
@inline vexp2(v::AbstractSIMD{2,Float32}) = vexp2(v, False())
@inline vexp10(v::AbstractSIMD{2,Float32}) = vexp10(v, False())
####################################################################################################
################################## Non-AVX512 implementation #######################################
####################################################################################################
@inline function vexp_generic_core(
x::Union{Float64,AbstractSIMD{<:Any,Float64}},
::Val{B}
) where {B}
N_float =
muladd(x, LogBo256INV(Val{B}(), Float64), MAGIC_ROUND_CONST(Float64))
N = target_trunc(reinterpret(UInt64, N_float))
N_float = N_float - MAGIC_ROUND_CONST(Float64)
r = fast_fma(N_float, LogBo256U(Val{B}(), Float64), x, fma_fast())
r = fast_fma(N_float, LogBo256L(Val{B}(), Float64), r, fma_fast())
# @show (N & 0x000000ff) % Int
js = vload(
VectorizationBase.zero_offsets(stridedpointer(J_TABLE)),
(N & 0x000000ff,)
)
k = N >>> 0x00000008
small_part = reinterpret(UInt64, vfmadd(js, expm1b_kernel(Val{B}(), r), js))
# return reinterpret(Float64, small_part), r, k, N_float, js
twopk = (k % UInt64) << 0x0000000000000034
res = reinterpret(Float64, twopk + small_part)
return res
end
@inline function vexp_generic(
x::Union{Float64,AbstractSIMD{<:Any,Float64}},
::Val{B}
) where {B}
res = vexp_generic_core(x, Val{B}())
res = ifelse(x >= MAX_EXP(Val{B}(), Float64), Inf, res)
res = ifelse(x <= MIN_EXP(Val{B}(), Float64), 0.0, res)
res = ifelse(isnan(x), x, res)
return res
end
@inline function vexp_generic_core(
x::Union{Float32,AbstractSIMD{<:Any,Float32}},
::Val{B}
) where {B}
N_float = vfmadd(x, LogBINV(Val{B}(), Float32), MAGIC_ROUND_CONST(Float32))
N = reinterpret(UInt32, N_float)
N_float = (N_float - MAGIC_ROUND_CONST(Float32))
r = fast_fma(N_float, LogBU(Val{B}(), Float32), x, fma_fast())
r = fast_fma(N_float, LogBL(Val{B}(), Float32), r, fma_fast())
small_part = reinterpret(UInt32, expb_kernel(Val{B}(), r))
twopk = N << 0x00000017
res = reinterpret(Float32, twopk + small_part)
return res
end
@inline function vexp_generic(
x::Union{Float32,AbstractSIMD{<:Any,Float32}},
::Val{B}
) where {B}
res = vexp_generic_core(x, Val{B}())
res = ifelse(x >= MAX_EXP(Val{B}(), Float32), Inf32, res)
res = ifelse(x <= MIN_EXP(Val{B}(), Float32), 0.0f0, res)
res = ifelse(isnan(x), x, res)
return res
end
for (func, base) in (:vexp2 => Val(2), :vexp => Val(ℯ), :vexp10 => Val(10))
@eval @inline $func(x, ::False) = vexp_generic(x, $base)
end
####################################################################################################
#################################### LOG HELPERS ###################################################
####################################################################################################
# TODO: move these back to log.jl when when the log implementations there are good & well tested enough to use.
@generated function vgetexp(v::Vec{W,T}) where {W,T<:Union{Float32,Float64}}
bits = (8W * sizeof(T))::Int
any(==(bits), (128, 256, 512)) || throw(
ArgumentError(
"Vectors are $bits bits, but only 128, 256, and 512 bits are supported."
)
)
ltyp = LLVM_TYPES[T]
vtyp = "<$W x $ltyp>"
dors = T === Float64 ? 'd' : 's'
instr = "$vtyp @llvm.x86.avx512.mask.getexp.p$(dors).$bits"
mtyp = W == 16 ? "i16" : "i8"
if bits == 512
decl = "declare $instr($vtyp, $vtyp, $mtyp, i32)"
instrs = "%res = call $instr($vtyp %0, $vtyp undef, $mtyp -1, i32 12)\nret $vtyp %res"
else
decl = "declare $instr($vtyp, $vtyp, $mtyp)"
instrs = "%res = call $instr($vtyp %0, $vtyp undef, $mtyp -1)\nret $vtyp %res"
end
arg_syms = [:(data(v))]
llvmcall_expr(
decl,
instrs,
:(_Vec{$W,$T}),
:(Tuple{_Vec{$W,$T}}),
vtyp,
[vtyp],
arg_syms
)
end
@generated function vgetmant(v::Vec{W,T}) where {W,T<:Union{Float32,Float64}}
bits = (8W * sizeof(T))::Int
any(==(bits), (128, 256, 512)) || throw(
ArgumentError(
"Vectors are $bits bits, but only 128, 256, and 512 bits are supported."
)
)
ltyp = LLVM_TYPES[T]
vtyp = "<$W x $ltyp>"
dors = T === Float64 ? 'd' : 's'
instr = "$vtyp @llvm.x86.avx512.mask.getmant.p$(dors).$bits"
mtyp = W == 16 ? "i16" : "i8"
if bits == 512
decl = "declare $instr($vtyp, i32, $vtyp, $mtyp, i32)"
instrs = "%res = call $instr($vtyp %0, i32 11, $vtyp undef, $mtyp -1, i32 12)\nret $vtyp %res"
else
decl = "declare $instr($vtyp, i32, $vtyp, $mtyp)"
instrs = "%res = call $instr($vtyp %0, i32 11, $vtyp undef, $mtyp -1)\nret $vtyp %res"
end
arg_syms = [:(data(v))]
llvmcall_expr(
decl,
instrs,
:(_Vec{$W,$T}),
:(Tuple{_Vec{$W,$T}}),
vtyp,
[vtyp],
arg_syms
)
end
@generated function vgetmant(
v::Vec{W,T},
::Union{StaticInt{N},Val{N}}
) where {W,T<:Union{Float32,Float64},N}
bits = (8W * sizeof(T))::Int
any(==(bits), (128, 256, 512)) || throw(
ArgumentError(
"Vectors are $bits bits, but only 128, 256, and 512 bits are supported."
)
)
ltyp = LLVM_TYPES[T]
vtyp = "<$W x $ltyp>"
dors = T === Float64 ? 'd' : 's'
instr = "$vtyp @llvm.x86.avx512.mask.getmant.p$(dors).$bits"
mtyp = W == 16 ? "i16" : "i8"
if bits == 512
decl = "declare $instr($vtyp, i32, $vtyp, $mtyp, i32)"
instrs = "%res = call $instr($vtyp %0, i32 $N, $vtyp undef, $mtyp -1, i32 12)\nret $vtyp %res"
else
decl = "declare $instr($vtyp, i32, $vtyp, $mtyp)"
instrs = "%res = call $instr($vtyp %0, i32 $N, $vtyp undef, $mtyp -1)\nret $vtyp %res"
end
arg_syms = [:(data(v))]
llvmcall_expr(
decl,
instrs,
:(_Vec{$W,$T}),
:(Tuple{_Vec{$W,$T}}),
vtyp,
[vtyp],
arg_syms
)
end
@generated function vgetmant12(v::Vec{W,T}) where {W,T<:Union{Float32,Float64}}
bits = (8W * sizeof(T))::Int
any(==(bits), (128, 256, 512)) || throw(
ArgumentError(
"Vectors are $bits bits, but only 128, 256, and 512 bits are supported."
)
)
ltyp = LLVM_TYPES[T]
vtyp = "<$W x $ltyp>"
dors = T === Float64 ? 'd' : 's'
instr = "$vtyp @llvm.x86.avx512.mask.getmant.p$(dors).$bits"
mtyp = W == 16 ? "i16" : "i8"
if bits == 512
decl = "declare $instr($vtyp, i32, $vtyp, $mtyp, i32)"
instrs = "%res = call $instr($vtyp %0, i32 12, $vtyp undef, $mtyp -1, i32 8)\nret $vtyp %res"
else
decl = "declare $instr($vtyp, i32, $vtyp, $mtyp)"
instrs = "%res = call $instr($vtyp %0, i32 12, $vtyp undef, $mtyp -1)\nret $vtyp %res"
end
arg_syms = [:(data(v))]
llvmcall_expr(
decl,
instrs,
:(_Vec{$W,$T}),
:(Tuple{_Vec{$W,$T}}),
vtyp,
[vtyp],
arg_syms
)
end
@generated function vroundscale(
v::Vec{W,T},
::Union{Val{N},StaticInt{N}}
) where {W,T,N}
bits = (8W * sizeof(T))::Int
any(==(bits), (128, 256, 512)) || throw(
ArgumentError(
"Vectors are $bits bits, but only 128, 256, and 512 bits are supported."
)
)
ltyp = LLVM_TYPES[T]
vtyp = "<$W x $ltyp>"
dors = T === Float64 ? 'd' : 's'
instr = "$vtyp @llvm.x86.avx512.mask.rndscale.p$(dors).$bits"
mtyp = W == 16 ? "i16" : "i8"
if bits == 512
decl = "declare $instr($vtyp, i32, $vtyp, $mtyp, i32)"
instrs = "%res = call $instr($vtyp %0, i32 $N, $vtyp undef, $mtyp -1, i32 4)\nret $vtyp %res"
else
decl = "declare $instr($vtyp, i32, $vtyp, $mtyp)"
instrs = "%res = call $instr($vtyp %0, i32 $N, $vtyp undef, $mtyp -1)\nret $vtyp %res"
end
arg_syms = [:(data(v))]
llvmcall_expr(
decl,
instrs,
:(_Vec{$W,$T}),
:(Tuple{_Vec{$W,$T}}),
vtyp,
[vtyp],
arg_syms
)
end
@inline vgetexp(v::VecUnroll) = VecUnroll(fmap(vgetexp, getfield(v, :data)))
@inline vgetmant(v::VecUnroll) = VecUnroll(fmap(vgetmant, getfield(v, :data)))
@inline vgetmant(v::VecUnroll, ::Union{StaticInt{N},Val{N}}) where {N} =
VecUnroll(fmap(vgetmant, getfield(v, :data), StaticInt{N}()))
@inline Base.significand(v::VecUnroll, ::True) =
VecUnroll(fmap(vgetmant12, getfield(v, :data)))
@inline vroundscale(v::VecUnroll, ::Union{StaticInt{N},Val{N}}) where {N} =
VecUnroll(fmap(vroundscale, getfield(v, :data), StaticInt{N}()))
@inline Base.significand(v::Vec, ::True) = vgetmant12(v)
@inline Base.significand(v::AbstractSIMDVector, ::True) = vgetmant12(Vec(v))
mask_exponent(::Val{Float64}) = 0x000f_ffff_ffff_ffff
set_exponent(::Val{Float64}) = 0x3ff0_0000_0000_0000
mask_exponent(::Val{Float32}) = 0x007fffff
set_exponent(::Val{Float32}) = 0x3f800000
mask_exponent(::Val{Float16}) = 0x07ff
set_exponent(::Val{Float16}) = 0x3c00
@inline function Base.significand(v::AbstractSIMD{W,T}, ::False) where {W,T}
reinterpret(
T,
(reinterpret(Base.uinttype(T), v) & mask_exponent(Val(T))) |
set_exponent(Val(T))
)
end
@inline Base.exponent(v::Vec, ::True) = vgetexp(v)
@inline Base.exponent(v::AbstractSIMDVector, ::True) = vgetexp(Vec(v))
@inline Base.exponent(v::VecUnroll, ::True) =
VecUnroll(fmap(vgetexp, getfield(v, :data)))
@static if VERSION ≥ v"1.7.0-beta"
using Base: inttype
else
@inline inttype(::Type{T}) where {T} = signed(Base.uinttype(T))
end
@inline function Base.exponent(v::AbstractSIMD{W,T}, ::False) where {W,T}
I = inttype(T)
vshift = reinterpret(I, v) >> (Base.significand_bits(T) % I)
e = ((vshift % Int) & Base.exponent_raw_max(T)) - I(Base.exponent_bias(T))
convert(T, e % Int32)
end
@inline Base.significand(
v::AbstractSIMD{W,T}
) where {W,T<:Union{Float32,Float64}} =
significand(v, has_feature(Val(:x86_64_avx512f)))
@inline Base.exponent(
v::AbstractSIMD{W,T}
) where {W,T<:Union{Float32,Float64}} =
exponent(v, has_feature(Val(:x86_64_avx512f)))
@inline Base.significand(v::AbstractSIMD{W,Float16}) where {W} =
significand(v, False())
@inline Base.exponent(v::AbstractSIMD{W,Float16}) where {W} =
exponent(v, False())
@inline Base.ldexp(v::AbstractSIMD, e::AbstractSIMD) =
vscalef(v, e, has_feature(Val(:x86_64_avx512f)))
# @inline function vexp2_v2(x::AbstractSIMD{8,Float64})
# x16 = 16.0*x
# # x8 = 8x
# r = vsreduce(x16, Val(0)) * 0.0625
# @fastmath begin
# m = x - r
# # m + r = x16, r ∈ (-0.5,0.5]
# # m/16 + r/16 = x, r ∈ (-1/32, 1/32]
# # we must now vreduce `mfrac`
# # return m
# end
# # expr = expm1b_kernel_4(Val(2), r)
# expr = expm1b_kernel_5(Val(2), r)
# inds = convert(UInt64, vsreduce(m, Val(1)) * 16.0)
# # inds = (reinterpret(UInt64, mfrac) >> 0x000000000000002d) & 0x000000000000000f
# # inds = (reinterpret(UInt64, mfrac) >> 0x0000000000000035) & 0x000000000000000f
# # @show r m mfrac reinterpret(UInt64, m) reinterpret(UInt64, mfrac)
# js = vpermi2pd(inds, TABLE_EXP_64_0, TABLE_EXP_64_1)
# # return r, mfrac, js
# # @show js m 16mfrac r mfrac inds%Int ((inds>>1)%Int)
# # js = 1.0
# small_part = vfmadd(js, expr, js)
# vscalef(small_part, m)#, r, mfrac, js, inds
# end
# for (func, base) in (:vexp2=>Val(2), :vexp=>Val(ℯ), :vexp10=>Val(10))
# @eval begin
# @inline function $func(x::AbstractSIMD{W,Float64}) where {W}
# N_float = muladd(x, LogBo256INV($base, Float64), MAGIC_ROUND_CONST(Float64))
# N = target_trunc(reinterpret(UInt64, N_float))
# N_float = N_float - MAGIC_ROUND_CONST(Float64)
# r = fast_fma(N_float, LogBo256U($base, Float64), x, fma_fast())
# r = fast_fma(N_float, LogBo256L($base, Float64), r, fma_fast())
# js = vload(VectorizationBase.zero_offsets(stridedpointer(J_TABLE)), (N & 0x000000ff,))
# k = N >>> 0x00000008
# small_part = vfmadd(js, expm1b_kernel($base, r), js)
# twopk = (k % UInt64) << 0x0000000000000034
# @show N_float k twopk small_part
# return small_part
# # res = reinterpret(Float64, twopk + small_part)
# # return res
# vscalef(small_part, N_float)
# end
# end
# end
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 18968 |
# log2(x) = vgetexp(x) + log2(vgetmant8(x))
@inline function vlog_v1(
x1::VectorizationBase.AbstractSIMD{W,Float64}
) where {W} # Testing if an assorted mix of operations
x14 = vgetmant(x1)
x8 = vgetexp(1.3333333333333333 * x1)
# x2 = reinterpret(UInt64, x1)
# x3 = x2 >>> 0x0000000000000020
# greater_than_zero = x1 > zero(x1)
# alternative = VectorizationBase.ifelse(x1 == zero(x1), -Inf, NaN)
# isinf = x1 == Inf
# x5 = x3 + 0x0000000000095f62
# x6 = x5 >>> 0x0000000000000014
# x7 = x6 - 0x00000000000003ff
# x8 = convert(Float64, x7 % Int)
# @show x3 x5 x6 x7 x8
# x9 = x5 << 0x0000000000000020
# x10 = x9 & 0x000fffff00000000
# x11 = x10 + 0x3fe6a09e00000000
# x12 = x2 & 0x00000000ffffffff
# x13 = x11 | x12
# x14 = reinterpret(Float64, x13)
x15 = x14 - 1.0
x18 = x14 + 1.0
x16 = x15 * x15
x17 = 0.5 * x16
x19 = x15 / x18
x20 = x19 * x19
x21 = x20 * x20
x22 = vfmadd(x21, 0.15313837699209373, 0.22222198432149784)
x23 = vfmadd(x21, x22, 0.3999999999940942)
x24 = x23 * x21
x25 = vfmadd(x21, 0.14798198605116586, 0.1818357216161805)
x29 = x24 + x17
x26 = vfmadd(x21, x25, 0.2857142874366239)
x27 = vfmadd(x21, x26, 0.6666666666666735)
x28 = x27 * x20
x30 = x29 + x28
x31 = x8 * 1.9082149292705877e-10
x32 = vfmadd(x19, x30, x31)
x33 = x15 - x17
x34 = x33 + x32
x35 = vfmadd(x8, 0.6931471803691238, x34)
return x35
# @show x35
# x36 = VectorizationBase.ifelse(greater_than_zero, x35, alternative)
# VectorizationBase.ifelse(isinf, Inf, x36)
end
const LOGTABLE = [
0.0,
0.007782140442054949,
0.015504186535965254,
0.02316705928153438,
0.030771658666753687,
0.0383188643021366,
0.0458095360312942,
0.053244514518812285,
0.06062462181643484,
0.06795066190850775,
0.07522342123758753,
0.08244366921107459,
0.08961215868968714,
0.09672962645855111,
0.10379679368164356,
0.11081436634029011,
0.11778303565638346,
0.12470347850095724,
0.13157635778871926,
0.13840232285911913,
0.1451820098444979,
0.15191604202584197,
0.15860503017663857,
0.16524957289530717,
0.17185025692665923,
0.1784076574728183,
0.184922338494012,
0.19139485299962947,
0.19782574332991987,
0.2042155414286909,
0.21056476910734964,
0.21687393830061436,
0.22314355131420976,
0.22937410106484582,
0.2355660713127669,
0.24171993688714516,
0.24783616390458127,
0.25391520998096345,
0.25995752443692605,
0.26596354849713794,
0.27193371548364176,
0.2778684510034563,
0.2837681731306446,
0.28963329258304266,
0.2954642128938359,
0.3012613305781618,
0.3070250352949119,
0.3127557100038969,
0.3184537311185346,
0.324119468654212,
0.329753286372468,
0.3353555419211378,
0.3409265869705932,
0.34646676734620857,
0.3519764231571782,
0.3574558889218038,
0.3629054936893685,
0.3683255611587076,
0.37371640979358406,
0.37907835293496944,
0.38441169891033206,
0.3897167511400252,
0.394993808240869,
0.4002431641270127,
0.4054651081081644,
0.4106599249852684,
0.415827895143711,
0.42096929464412963,
0.4260843953109001,
0.4311734648183713,
0.43623676677491807,
0.4412745608048752,
0.44628710262841953,
0.45127464413945856,
0.4562374334815876,
0.46117571512217015,
0.46608972992459924,
0.470979715218791,
0.4758459048699639,
0.4806885293457519,
0.4855078157817008,
0.4903039880451938,
0.4950772667978515,
0.4998278695564493,
0.5045560107523953,
0.5092619017898079,
0.5139457511022343,
0.5186077642080457,
0.5232481437645479,
0.5278670896208424,
0.5324647988694718,
0.5370414658968836,
0.5415972824327444,
0.5461324375981357,
0.5506471179526623,
0.5551415075405016,
0.5596157879354227,
0.564070138284803,
0.5685047353526688,
0.5729197535617855,
0.5773153650348236,
0.5816917396346225,
0.5860490450035782,
0.5903874466021763,
0.5947071077466928,
0.5990081896460834,
0.6032908514380843,
0.6075552502245418,
0.6118015411059929,
0.616029877215514,
0.6202404097518576,
0.6244332880118935,
0.6286086594223741,
0.6327666695710378,
0.6369074622370692,
0.6410311794209312,
0.6451379613735847,
0.6492279466251099,
0.6533012720127457,
0.65735807270836,
0.661398482245365,
0.6654226325450905,
0.6694306539426292,
0.6734226752121667,
0.6773988235918061,
0.6813592248079031,
0.6853040030989194,
0.689233281238809,
0.6931471805599453
];
logb(::Type{Float32}, ::Val{2}) = 1.4426950408889634
logb(::Type{Float32}, ::Val{:ℯ}) = One()
logb(::Type{Float32}, ::Val{10}) = 0.4342944819032518
logbU(::Type{Float64}, ::Val{2}) = 1.4426950408889634
logbL(::Type{Float64}, ::Val{2}) = 2.0355273740931033e-17
logbU(::Type{Float64}, ::Val{:ℯ}) = One()
logbL(::Type{Float64}, ::Val{:ℯ}) = Zero()
logbU(::Type{Float64}, ::Val{10}) = 0.4342944819032518
logbL(::Type{Float64}, ::Val{10}) = 1.098319650216765e-17
@inline function vlog_base(v::AbstractSIMD{W,Float64}) where {W}
y = vgetmant12(v)
mf = vgetexp(v)
y128 = 128.0 * y
f128 = vsreduce(y128, Val(0))
F128 = y128 - f128# - 128.0
jp = convert(UInt, F128) - 0x0000000000000080 # - 128
# jp = convert(UInt,F128 - 128.0) # - 128
# jp = convert(UInt,F128) - 0x000000000000007f # - 127
hi = vload(zstridedpointer(LOGTABLE), (jp,))
# l_hi = muladd(0.6931471805601177, mf, hi)
l_hi = muladd(0.6931471805599453, mf, hi)
u = (2.0 * f128) / (y128 + F128)
# u = (2.0*f128)*vinv_fast(y128+F128)
v = u * u
q = u * v * muladd(0.012500053168098584, v, 0.08333333333303913)
## Step 4
m_hi = logbU(Float64, Val(:ℯ))
m_lo = logbL(Float64, Val(:ℯ))
return fma(m_hi, l_hi, fma(m_hi, (u + q), m_lo * l_hi))
end
const LOG_TABLE_1 = Vec((
Core.VecElement(-0.6931471805599453),
Core.VecElement(-0.6325225587435105),
Core.VecElement(-0.5753641449035618),
Core.VecElement(-0.5212969236332861),
Core.VecElement(-0.4700036292457356),
Core.VecElement(-0.42121346507630353),
Core.VecElement(-0.3746934494414107),
Core.VecElement(-0.33024168687057687)
))
const LOG_TABLE_2 = Vec((
Core.VecElement(-0.2876820724517809),
Core.VecElement(-0.24686007793152578),
Core.VecElement(-0.2076393647782445),
Core.VecElement(-0.16989903679539747),
Core.VecElement(-0.13353139262452263),
Core.VecElement(-0.09844007281325252),
Core.VecElement(-0.06453852113757118),
Core.VecElement(-0.0317486983145803)
))
const LOG2_TABLE_1 = Vec((
Core.VecElement(-1.0),
Core.VecElement(-0.9125371587496606),
Core.VecElement(-0.8300749985576876),
Core.VecElement(-0.7520724865564145),
Core.VecElement(-0.6780719051126377),
Core.VecElement(-0.6076825772212398),
Core.VecElement(-0.5405683813627028),
Core.VecElement(-0.4764380439429871)
))
const LOG2_TABLE_2 = Vec((
Core.VecElement(-0.4150374992788438),
Core.VecElement(-0.3561438102252753),
Core.VecElement(-0.2995602818589078),
Core.VecElement(-0.24511249783653147),
Core.VecElement(-0.19264507794239588),
Core.VecElement(-0.14201900487242788),
Core.VecElement(-0.09310940439148147),
Core.VecElement(-0.04580368961312479)
))
@inline function logkern_5(x)
# c5 = 1.442695040910480151153610022148007414933621426294604120324193438507895065110221
# c4 = -0.72134752044717115185862113438227555323895953631804827391698847566536447158971
# c3 = 0.4808981720904872241142144122992626169360496403735493070672998980156544684558863
# c2 = -0.360673694648970133332498335009073996964643657971752173300330580172269152150781
# c1 = 0.288894281140228357047827707854224408808934973997456175085011693761970856737952
# c0 = -0.2406712024835816804281715317202731627491220621210104128295077305793044245650939
# c5 = 1.000000000004163234554825918991486005405540498226964649982446185635197444806964
# c4 = -0.5000000000218648309257667907050168790946684146132007063399540958947352513227305
# c3 = 0.3333332566080562266977844691232384812586343066695585749657855454332169568535108
# c2 = -0.2499998582263749067900056202432438929369945607183640079498376650719435347943278
# c1 = 0.2002094490921391961924703669397489236496809044839667749701044680362284866088264
# c0 = -0.1669110816794161712236614512036245351950242871767130610770379611369082994542161
c5 =
1.442695040894969685206229605569847427762291596137745860941223179497930808392396
c4 =
-0.7213475204760259868264417322489103259481854442059435485772582433076339753304261
c3 =
0.4808982362718110098786870255779326384270826960362097549483653590161925169305216
c2 =
-0.3606735556861350010148197635190486357722791582949450655689225850005157925747294
c1 =
0.2888411793443405953870653225945083997249994212173033378047806528684724370915621
c0 =
-0.2408017898083064242160316054616972180231905999993934707245482866675760802361285
x * vmuladd_fast(
vmuladd_fast(
vmuladd_fast(vmuladd_fast(vmuladd_fast(c0, x, c1), x, c2), x, c3),
x,
c4
),
x,
c5
)
end
@inline function logkern_8(x)
c8 =
1.442695040888963407816086035203962841602383176205260263758551036257573600888366
c7 =
-0.7213475204444770815507673261267005370242876005369959065788590988093119140411675
c6 =
0.4808983469629686528945376971132664305807537133089662498292066034260696561965772
c5 =
-0.360673760285340441741375667112629490399269780822657391947176916709523570522447
c4 =
0.2885390083116887609918806358733183651489107200432558827730599551390737454696275
c3 =
-0.2404489409194524614303773871505793252012695382714584724417731408820873961880021
c2 =
0.20609895474146851962537542011143008396458423224465328706920087552924788983302
c1 =
-0.180654273148821532237028827440264938703627661154871403363931834846318112573294
c0 =
0.1606521926313221723948643034905571707676419356400609690156705532267686492918868
x * vmuladd_fast(
vmuladd_fast(
vmuladd_fast(
vmuladd_fast(
vmuladd_fast(
vmuladd_fast(vmuladd_fast(vmuladd_fast(c0, x, c1), x, c2), x, c3),
x,
c4
),
x,
c5
),
x,
c6
),
x,
c7
),
x,
c8
)
end
@inline function logkern_9(x)
c9 =
1.442695040888963407587958876357516873223860332904434711906078224014586198339343
c8 =
-0.7213475204444817057711203479236040464424888160067823280588699959470059217833771
c7 =
0.480898346962976128170017519528240603877475137860740135494448418612334521964808
c6 =
-0.3606737602222044528926485178925468152698544632914287402177742224438034341355266
c5 =
0.2885390082734153359755295484533201676037233143490110170185338945645707433116462
c4 =
-0.2404491736638805517094434611266213341107664043116792359649609367170386448753448
c3 =
0.2060990174486639706727491347594731948792605354335332681880496880277396108033051
c2 =
-0.1803364995021978052782243890647317433194251060644842283875753942662516598663569
c1 =
0.1606200865414345528500280443621751155379142697854250695344975262087060612426921
c0 =
-0.1446222903636375678552838798467050326611974502493221959482776774515654211467748
x * vmuladd_fast(
vmuladd_fast(
vmuladd_fast(
vmuladd_fast(
vmuladd_fast(
vmuladd_fast(
vmuladd_fast(vmuladd_fast(vmuladd_fast(c0, x, c1), x, c2), x, c3),
x,
c4
),
x,
c5
),
x,
c6
),
x,
c7
),
x,
c8
),
x,
c9
)
end
@inline function logkern_7(x)
c7 =
1.442695040888962266311320415441181965256745101864924017704730474290825179713386
c6 =
-0.7213475204444734849561916107798427186725025515630934214552341603017790847666743
c5 =
0.4808983470003734483568608991659360631657097156445816019788134536213757250523677
c4 =
-0.3606737603148037624174654830835163329225507561910479104865837838596482628691627
c3 =
0.2885388167992364548202154388340846682584745440281798969580530860342799719518947
c2 =
-0.2404488805785497044335968015014331102915764909299868970339917618519049206072763
c1 =
0.2064127286335890836287774329056506792116396261746810041599847620260037837288407
c0 =
-0.1806895812056507095939337124225687685035687284931465242170306355644855623118632
# c7 = 1.442695040888962266311320415440673226691509089399247319200433080789551133947874
# c6 = -0.7213475204444734849561916106463536879222835723990934589483454213207143172699565
# c5 = 0.4808983470003734483568609116686822486155905301580259810441289744019151890859293
# c4 = -0.3606737603148037624174668433501054879718352818773442452949795967039102009618056
# c3 = 0.2885388167992364548201727628199776711909314676794911979761485405529591339257216
# c2 = -0.2404488805785497044302647238681858349839732924674030562755638231437855429968676
# c1 = 0.2064127286335890836637376093489490181845837682333985654448331246682934490703086
# c0 = -0.1806895812056507118633103913062383463992924441916921974716424429045368328209497
x * vmuladd_fast(
vmuladd_fast(
vmuladd_fast(
vmuladd_fast(
vmuladd_fast(vmuladd_fast(vmuladd_fast(c0, x, c1), x, c2), x, c3),
x,
c4
),
x,
c5
),
x,
c6
),
x,
c7
)
end
@inline function logkern_6(x)
c6 =
1.44269504088896112481574913926580271258562668309654298701097420877374680031478
c5 =
-0.7213475204628791729494574154147857065200754915843724028524643433219030089552769
c4 =
0.4808983470214135184345854322580473442742138878734777669129377480116915710051349
c3 =
-0.3606736095355376179878548883602333628571188158521650924319649055538192403728833
c2 =
0.2885387593457860540619732799086814986482101737149964973666904711488634555687257
c1 =
-0.2407576763559816924463087687841796294903125418141104155734727426854999960321288
c0 =
0.2064519502023243513328580164119739425968179159770070826936661162470222233739379
x * vmuladd_fast(
vmuladd_fast(
vmuladd_fast(
vmuladd_fast(vmuladd_fast(vmuladd_fast(c0, x, c1), x, c2), x, c3),
x,
c4
),
x,
c5
),
x,
c6
)
end
# Probably the best
@inline function vlog2_fast(a::AbstractSIMD{8,Float64})
# log2(vgetmant(a, Val(8))) + vgetexp(a) == log2(a)
m = vgetmant(a, Val(8)) # m ∈ [1,2)
e = vgetexp(a)
# m ∈ [1,2); We reduce further...
y = inv_approx(m) # y ∈ (0.5,1]
# return y
y₀ = vroundscale(y, Val(80)) # 80 = 16*(1+4)
# y₀ is a multiple of 32; 0.5:(1/32):1.0
# log2(m) = y * x = log2(y) + log2(x)
# r = x - 1
# r + 1 = m*y;
# log(r+1) = log(m) + log(y₀)
# log(m) = log(1+r) - log(y₀)
r = vfmsub(m, y₀, 1.0) # m * y - 1
log1pr = logkern_5(r)
# log1pr = logkern_6(r)
# log1pr = logkern_7(r)
# log1pr = logkern_9(r)
# @show r log1pr m
# log1pr = logkern_8(r)
y₀notone = y₀ ≠ 1.0
inds = (reinterpret(UInt, y₀) >>> 48) & 0x0f
# @show y₀ inds r
# logy₀ = vpermi2pd(inds, LOG2_TABLE_1, LOG2_TABLE_2)
logy₀ = vpermi2pd(inds, LOG2_TABLE_1, LOG2_TABLE_2)
# emlogy₀ = e - logy₀
emlogy₀ = ifelse(y₀notone, e - logy₀, e)
log1pr + emlogy₀
# logm = ifelse(y₀notone, log1pr - logy₀, log1pr)
# @show r y₀ e
# return r, m, y₀, logy₀, e
# logkern_5(r) - logy₀ + e
# logm = ifelse(y₀isone, log1pr, log1pr - logy₀)
# log1pr += e
# logm = ifelse(y₀notone, log1pr - logy₀, log1pr)
# @show logm log1pr logy₀ e
# logm + e
end
@inline vlog_fast(x::T) where {T} =
vlog2_fast(x) * convert(T, 0.6931471805599453)
@inline vlog10_fast(x::T) where {T} =
vlog2_fast(x) * convert(T, 0.3010299956639812)
# @inline Base.FastMath.log_fast(v::AbstractSIMD) = vlog_fast(float(v))
# @inline Base.FastMath.log2_fast(v::AbstractSIMD) = vlog2_fast(float(v))
# @inline Base.FastMath.log10_fast(v::AbstractSIMD) = vlog10_fast(float(v))
@inline function log2_kern_5_256(x)
c5 =
1.442695040888963430242018860033667730529246266516984575206349078995811595149434
c4 =
-0.7213475204444818238116304451613321925305725926194819917643836452178866375548279
c3 =
0.480898346935995048804761640936874918715608085587068841980211938177173076051027
c2 =
-0.3606737601723789903264415189400867418476075783130968675245051405508304609911771
c1 =
0.2885437254815682617764304037319501313717679206671517967342968352103864725323379
c0 =
-0.2404546770228689226321452773936263107841762894627528631584290286000941540854746
x * vmuladd_fast(
vmuladd_fast(
vmuladd_fast(vmuladd_fast(vmuladd_fast(c0, x, c1), x, c2), x, c3),
x,
c4
),
x,
c5
)
end
const LOG2_TABLE_128 =
Float64[log2(x) for x ∈ range(big"0.5"; step = 1 / 256, length = 128)]
@inline function vlog2_v2(a::AbstractSIMD{W,Float64}) where {W}
# log2(vgetmant(a, Val(8))) + vgetexp(a) == log2(a)
m = vgetmant(a, Val(8)) # m ∈ [1,2)
e = vgetexp(a)
# m ∈ [1,2); We reduce further...
y = inv_approx(m) # y ∈ (0.5,1]
# return y
y₀ = vroundscale(y, Val(128)) # 80 = 16*(1+7)
# y₀ is a multiple of 32; 0.5:
# r + 1 = m*y;
# log(r+1) = log(m) + log(y₀)
# log(m) = log(1+r) - log(y₀)
r = vfmsub(m, y₀, 1.0)
# log1pr = logkern_5(r)
log1pr = log2_kern_5_256(r)
y₀notone = y₀ ≠ 1.0
inds = (reinterpret(Int, y₀) >> 45) & 0xff
# inds = (reinterpret(UInt, y₀) >>> 48) & 0x0f
# @show y₀ inds r
# logy₀ = vpermi2pd(inds, LOG2_TABLE_1, LOG2_TABLE_2)
logy₀ = vload(zstridedpointer(LOG2_TABLE_128), (inds,))
emlogy₀ = ifelse(y₀notone, e - logy₀, e)
log1pr + emlogy₀
end
# @inline function Base.log(x1::AbstractSIMD{W,Float64}) where {W}
# @inline function vlog(x1::Float64)
# @inline Base.log(v::AbstractSIMD) = log(float(v))
# @inline function vlog_fast(x1::AbstractSIMD{W,Float32}) where {W}
# notzero = x1 != zero(x1)
# greater_than_zero = x1 > zero(x1)
# x3 = x1 < 1.1754944f-38#3.4332275f-5
# # x6 = true if x3 entirely false
# x7 = x1 * 8.388608f6#14.0f0
# x8 = ifelse(x3, x7, x1)
# x10 = reinterpret(UInt32, x8)
# x11 = x10 + 0x004afb0d
# x12 = x11 >>> 0x00000017
# x13 = ifelse(x3, 0xffffff6a, 0xffffff81)
# x15 = x12 + x13
# x16 = x11 & 0x007fffff
# x17 = x16 + 0x3f3504f3
# x18 = reinterpret(Float32, x17)
# x19 = x18 - 1f0
# x20 = x18 + 1f0
# x21 = x19 / x20
# x22 = x21 * x21
# x23 = x22 * x22
# x24 = vfmadd(x23, 0.24279079f0, 0.40000972f0)
# x25 = x24 * x23
# x26 = vfmadd(x23, 0.6666666f0, 0.6666666f0)
# x27 = x26 * x22
# x28 = x19 * x19
# x29 = x28 * 5f-1
# x30 = convert(Float32, x15 % Int32)
# x31 = x27 + x29
# x32 = x31 + x25
# x33 = x30 * -0.00021219444f0
# x34 = vfmadd(x21, x32, x33)
# x35 = x19 - x29
# x36 = x35 + x34
# x37 = vfmadd(x30, 0.6933594f0, x36)
# # x37 = vfmadd(x30, 0.6931472f0, x36)
# x39 = ifelse(x1 == Inf32, Inf32, x37)
# x40 = ifelse(notzero, x39, -Inf32)
# ifelse(x1 < zero(x1), NaN32, x40)
# end
# @inline Base.FastMath.log_fast(v::AbstractSIMD) = vlog_fast(float(v))
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 7372 | @inline function pow_by_square(_v, e::IntegerTypesHW)
v = float(_v)
if e < 0
v = y = inv(v)
e = -e
elseif e == 0
return one(v)
else
y = v
end
tz = trailing_zeros(e)
e >>= (tz + one(tz))
while tz > zero(tz)
y = Base.FastMath.mul_fast(y, y)
tz -= one(tz)
end
x = y
while e ≠ zero(e)
y = Base.FastMath.mul_fast(y, y)
tz = trailing_zeros(e)
e >>= (tz + one(tz))
while tz > zero(tz)
y = Base.FastMath.mul_fast(y, y)
tz -= one(tz)
end
x = Base.FastMath.mul_fast(x, y)
end
return x
end
@generated function pow_by_square(_v, ::StaticInt{E}) where {E}
e = E
q = Expr(:block, Expr(:meta, :inline), :(v = float(_v)))
xdefined = false
if e < 0
push!(q.args, :(v = y = inv(v)))
e = -e
else
push!(q.args, :(y = v))
end
mf = Base.FastMath.mul_fast
while e ≠ zero(e)
xdefined && push!(q.args, :(y = $mf(y, y)))
tz = trailing_zeros(e)
e >>= (tz + one(tz))
while tz > zero(tz)
push!(q.args, :(y = $mf(y, y)))
tz -= one(tz)
end
if xdefined
push!(q.args, :(x = $mf(x, y)))
else
xdefined = true
push!(q.args, :(x = y))
end
end
push!(q.args, :x)
return q
end
# 5 = 101 = 2^2 + 2^0 # x^4 * x^1
# x^5 = x^4 * x
@inline Base.:^(v::AbstractSIMD{W,T}, i::IntegerTypesHW) where {W,T} =
pow_by_square(v, i)
@inline Base.:^(
v::AbstractSIMD{W,T},
i::IntegerTypesHW
) where {W,T<:Union{Float32,Float64}} = pow_by_square(v, i)
@inline Base.:^(v::AbstractSIMD, ::StaticInt{N}) where {N} =
pow_by_square(v, StaticInt{N}())
@inline Base.FastMath.pow_fast(v::AbstractSIMD, ::StaticInt{N}) where {N} =
pow_by_square(v, StaticInt{N}())
@inline Base.FastMath.pow_fast(
v::AbstractSIMD{W,T},
i::IntegerTypesHW
) where {W,T} = pow_by_square(v, i)
@inline Base.FastMath.pow_fast(
v::AbstractSIMD{W,T},
i::IntegerTypesHW
) where {W,T<:Union{Float32,Float64}} = pow_by_square(v, i)
@inline Base.FastMath.pow_fast(v::AbstractSIMD, x::FloatingTypes) =
exp2(Base.FastMath.log2_fast(v) * x)
@inline Base.FastMath.pow_fast(v::FloatingTypes, x::AbstractSIMD) =
exp2(Base.FastMath.log2_fast(v) * x)
@inline Base.FastMath.pow_fast(v::AbstractSIMD, x::AbstractSIMD) =
exp2(Base.FastMath.log2_fast(v) * x)
@inline Base.literal_pow(::typeof(^), x::AbstractSIMD, ::Val{N}) where {N} =
pow_by_square(x, StaticInt(N))
# @inline relu(x) = (y = zero(x); ifelse(x > y, x, y))
@inline relu(x) = (y = zero(x); ifelse(x < y, y, x))
Base.sign(v::AbstractSIMD) = ifelse(v > 0, one(v), -one(v))
@inline Base.fld(x::AbstractSIMD, y::AbstractSIMD) =
div(promote_div(x, y)..., RoundDown)
@inline Base.fld(x::AbstractSIMD, y::Real) =
div(promote_div(x, y)..., RoundDown)
@inline Base.fld(x::Real, y::AbstractSIMD) =
div(promote_div(x, y)..., RoundDown)
@inline function Base.div(
x::AbstractSIMD{W,T},
y::AbstractSIMD{W,T},
::RoundingMode{:Down}
) where {W,T<:IntegerTypes}
d = div(x, y)
d - (signbit(x ⊻ y) & (d * y != x))
end
@inline Base.mod(
x::AbstractSIMD{W,T},
y::AbstractSIMD{W,T}
) where {W,T<:IntegerTypes} = ifelse(y == -1, zero(x), x - fld(x, y) * y)
@inline Base.mod(
x::AbstractSIMD{W,T},
y::AbstractSIMD{W,T}
) where {W,T<:Unsigned} = rem(x, y)
@inline function Base.mod(
x::AbstractSIMD{W,T1},
y::T2
) where {W,T1<:SignedHW,T2<:UnsignedHW}
_x, _y = promote_div(x, y)
unsigned(mod(_x, _y))
end
@inline function Base.mod(
x::AbstractSIMD{W,T1},
y::T2
) where {W,T1<:UnsignedHW,T2<:SignedHW}
_x, _y = promote_div(x, y)
signed(mod(_x, _y))
end
@inline function Base.mod(
x::AbstractSIMD{W,T1},
y::AbstractSIMD{W,T2}
) where {W,T1<:SignedHW,T2<:UnsignedHW}
_x, _y = promote_div(x, y)
unsigned(mod(_x, _y))
end
@inline function Base.mod(
x::AbstractSIMD{W,T1},
y::AbstractSIMD{W,T2}
) where {W,T1<:UnsignedHW,T2<:SignedHW}
_x, _y = promote_div(x, y)
signed(mod(_x, _y))
end
@inline Base.mod(
i::AbstractSIMD{<:Any,<:IntegerTypes},
r::AbstractUnitRange{<:IntegerTypes}
) = mod(i - first(r), length(r)) + first(r)
@inline Base.mod(x::AbstractSIMD, y::NativeTypes) = mod(promote_div(x, y)...)
@inline Base.mod(x::NativeTypes, y::AbstractSIMD) = mod(promote_div(x, y)...)
# avoid ambiguity with clamp(::Missing, lo, hi) in Base.Math at math.jl:1258
# but who knows what would happen if you called it
for (X, L, H) in Iterators.product(fill([:Any, :Missing, :AbstractSIMD], 3)...)
any(==(:AbstractSIMD), (X, L, H)) || continue
@eval @inline function Base.clamp(x::$X, lo::$L, hi::$H)
x_, lo_, hi_ = promote(x, lo, hi)
ifelse(x_ > hi_, hi_, ifelse(x_ < lo_, lo_, x_))
end
end
@inline Base.FastMath.hypot_fast(x::AbstractSIMD, y::AbstractSIMD) = sqrt(
Base.FastMath.add_fast(
Base.FastMath.mul_fast(x, x),
Base.FastMath.mul_fast(y, y)
)
)
@inline Base.clamp(
x::AbstractSIMD{<:Any,<:IntegerTypes},
r::AbstractUnitRange{<:IntegerTypes}
) = clamp(x, first(r), last(r))
@inline function Base.gcd(
a::AbstractSIMDVector{W,I},
b::AbstractSIMDVector{W,I}
) where {W,I<:Base.HWReal}
aiszero = a == zero(a)
biszero = b == zero(b)
absa = abs(a)
absb = abs(b)
za = trailing_zeros(a)
zb = ifelse(biszero, zero(b), trailing_zeros(b))
k = min(za, zb)
u = unsigned(ifelse(biszero, zero(a), abs(a >> za)))
v = unsigned(ifelse(aiszero, zero(b), abs(b >> zb)))
ne = u ≠ v
while vany(ne)
ulev = (u > v) & ne
t = u
u = ifelse(ulev, v, u)
v = ifelse(ulev, t, v)
d = v - u
v = ifelse(ne, d >> trailing_zeros(d), v)
ne = u ≠ v
end
ifelse(aiszero, absb, ifelse(biszero, absa, (u << k) % I))
end
@inline Base.gcd(a::VecUnroll, b::Real) = VecUnroll(fmap(gcd, data(a), b))
@inline Base.gcd(a::Real, b::VecUnroll) = VecUnroll(fmap(gcd, a, data(b)))
@inline Base.gcd(a::VecUnroll, b::VecUnroll) =
VecUnroll(fmap(gcd, data(a), data(b)))
@inline function Base.lcm(a::AbstractSIMD, b::AbstractSIMD)
z = zero(a)
isz = (a == z) | (b == z)
ifelse(isz, z, (b ÷ gcd(b, a)) * a)
end
@inline Base.lcm(a::AbstractSIMD, b::Real) = ((c, d) = promote(a, b); lcm(c, d))
@inline Base.lcm(a::Real, b::AbstractSIMD) = ((c, d) = promote(a, b); lcm(c, d))
@inline function Base.getindex(
A::Array,
i::AbstractSIMD,
j::Vararg{AbstractSIMD,K}
) where {K}
vload(stridedpointer(A), (i, j...))
end
@inline Base.Sort.midpoint(
lo::AbstractSIMDVector{W,I},
hi::AbstractSIMDVector{W,I}
) where {W,I<:Integer} = lo + ((hi - lo) >>> 0x01)
for TType in [:Integer, :(AbstractSIMDVector{W,<:Integer})]
@eval begin
@inline function Base.searchsortedlast(
v::Array,
x::AbstractSIMDVector{W,I},
lo::T,
hi::T,
o::Base.Ordering
) where {W,I,T<:$TType}
u = convert(T, typeof(x)(1))
lo = lo - u
hi = hi + u
st = lo < hi - u
@inbounds while vany(st)
m = Base.Sort.midpoint(lo, hi)
b = Base.Order.lt(o, x, v[m]) & st
hi = ifelse(b, m, hi)
lo = ifelse(b, lo, m)
st = lo < hi - u
end
return lo
end
end
end
@inline function Base.searchsortedlast(
v::Array,
x::VecUnroll,
lo::T,
hi::T,
o::Base.Ordering
) where {T<:Integer}
VecUnroll(fmap(searchsortedlast, v, data(x), lo, hi, o))
end
@inline function Base.searchsortedlast(
v::Array,
x::VecUnroll,
lo::VecUnroll,
hi::VecUnroll,
o::Base.Ordering
)
VecUnroll(fmap(searchsortedlast, v, data(x), data(lo), data(hi), o))
end
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 8908 | # Copyright (c) 2016, Johan Mabille, Sylvain Corlay, Wolf Vollprecht and Martin Renou
# Copyright (c) 2016, QuantStack
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@inline integer(v::AbstractSIMD{W,Float64}) where {W} =
_integer(v, has_feature(Val(:x86_64_avx512dq)))
@inline _integer(v::AbstractSIMD{W,Float64}, ::True) where {W} =
vconvert(Int64, v)
@inline _integer(v::AbstractSIMD{W,Float64}, ::False) where {W} =
vconvert(Int32, v)
@inline function erf_kernel_l9(x::Union{Float64,AbstractSIMD{<:Any,Float64}})
y = vfmadd(x, 6.49254556481904e-5, 0.00120339380863079)
z = vfmadd(x, 0.000364915280629351, 0.00849717371168693)
y = vfmadd(x, y, 0.0403259488531795)
z = vfmadd(x, z, 0.0869936222615386)
y = vfmadd(x, y, 0.135894887627278)
z = vfmadd(x, z, 0.453767041780003)
y = vfmadd(x, y, 1.12837916709551)
z = vfmadd(x, z, 1.0)
res = y / z
res + vfnmadd(
8.13035732583580548119176214095147680242299108680658322550212199643608432312984e-16,
x,
2.679870718713541577762315771546743935213688171443421284936882991116060314650674e-15
)
# Base.FastMath.div_fast(w * y, z)
end
# erf(x)/x = erf_poly_l9(x*x) # r = x^2; x = sqrt(r)
# erf(sqrt(r))/sqrt(r) = erf_poly_l9(r)
# x in 0...0.65
# r in 0...0.65^2
# using Remez, SpecialFunctions
# erftestsmall(x) = (r = sqrt(x); erf(r)/r)
# ncinit = BigFloat[1.12837916709551, 0.135894887627278, 0.0403259488531795, 0.00120339380863079, 6.49254556481904e-5];
# dcinit = BigFloat[1.0, 0.453767041780003, 0.0869936222615386, 0.00849717371168693, 0.000364915280629351];
# N,D,E,X = ratfn_minimax(erftestsmall, [big"1e-70", big"0.65"^2], 4, 4, (w,x)->BigFloat(1), ncinit, dcinit); @show(E); N
vscalef(b::Bool, x, y, z) = b ? (x * (2^y)) : z
@inline function erf_v56f_kernel(v3f, v4f)
v29f = v4f * -1.4426950408889634
v30f = round(v29f)
v31f = vfmsub(v30f, -0.6931471803691238, v4f)
v32f = v30f * 1.9082149292705877e-10
v33f = v31f - v32f
v34f = v33f * v33f
v35f = vfmadd(v34f, 4.1381367970572385e-8, -1.6533902205465252e-6)
v36f = vfmadd(v34f, v35f, 6.613756321437934e-5)
v37f = vfmadd(v34f, v36f, -0.0027777777777015593)
v39f = vfnmsub(v34f, v37f, 0.16666666666666602)
v40f = vfmadd(v34f, v39f, v33f)
v41f = v40f * v33f
v42f = 2.0 - v40f
v43f = Base.FastMath.div_fast(v41f, v42f)
v44f = 1.0 - v32f
v45f = v44f + v31f
v46f = v45f + v43f
m47 = v4f ≤ 708.3964185322641
v54f = vscalef(m47, v46f, v30f, zero(v3f))
# v56f = ifelse(v4f < 709.782712893384, v54f, Inf) # TODO: NaN should return v54f
end
@inline function erf_v97f_kernel(v3f)
v91f = vfmadd(v3f, 0.0400072964526861, 0.278788439273629)
v86f = vfmadd(v3f, 0.0225716982919218, 0.157289620742839)
v92f = vfmadd(v3f, v91f, 1.05074004614827)
v87f = vfmadd(v3f, v86f, 0.581528574177741)
v93f = vfmadd(v3f, v92f, 2.38574194785344)
v88f = vfmadd(v3f, v87f, 1.26739901455873)
v94f = vfmadd(v3f, v93f, 3.37367334657285)
v89f = vfmadd(v3f, v88f, 1.62356584489367)
v95f = vfmadd(v3f, v94f, 2.75143870676376)
v90f = vfmadd(v3f, v89f, 0.99992114009714)
v96f = vfmadd(v3f, v95f, 1.0)
v97f = v90f / v96f
end
@inline function erf_v71_kernel(v3f)
v65f = vfmadd(v3f, 0.0125304936549413, 0.126579413030178)
v60f = vfmadd(v3f, 0.00706940843763253, 0.0714193832506776)
v66f = vfmadd(v3f, v65f, 0.594651311286482)
v61f = vfmadd(v3f, v60f, 0.331899559578213)
v67f = vfmadd(v3f, v66f, 1.61876655543871)
v62f = vfmadd(v3f, v61f, 0.878115804155882)
v68f = vfmadd(v3f, v67f, 2.65383972869776)
v63f = vfmadd(v3f, v62f, 1.33154163936765)
v69f = vfmadd(v3f, v68f, 2.45992070144246)
v64f = vfmadd(v3f, v63f, 0.999999992049799)
v70f = vfmadd(v3f, v69f, 1.0)
v64f, v70f
end
@inline function verf(v0f::Union{Float64,AbstractSIMD{<:Any,Float64}})
# v1 = reinterpret(UInt64, v)
# v2 = v1 & 0x7fffffffffffffff
# v3 = reinterpret(Float64, v2)
v3f = abs(v0f)
v4f = v0f * v0f
# m6 = v3f < 0.65
# m6 = v3f < 0.68
m6 = v3f < 0.675
if vany(collapse_or(m6))
v19f = v0f * erf_kernel_l9(v4f)
vall(collapse_and(m6)) && return v19f
else
# v19f = zero(v0f)
v19f = _vundef(v0f)
end
m23 = v3f < 2.725
v56f = erf_v56f_kernel(v3f, v4f)
if vany(collapse_or(m23 & (~m6))) # any(0.675 < v3f < 2.2) # l58
v64f, v70f = erf_v71_kernel(v3f)
v71f = Base.FastMath.div_fast(v56f, v70f)
v73f = vfnmadd(v71f, v64f, 1.0)
v78f = copysign(v73f, v0f)
v84f = ifelse(m6, v19f, v78f)
vall(collapse_and(m23)) && return v84f
else
v84f = v19f
end
# l83 # > 2.2
# v91f = vfmadd(v3f, 0.0400072964526861, 0.278788439273629)
# v86f = vfmadd(v3f, 0.0225716982919218, 0.157289620742839)
# v92f = vfmadd(v3f, v91f, 1.05074004614827)
# v87f = vfmadd(v3f, v86f, 0.581528574177741)
# v93f = vfmadd(v3f, v92f, 2.38574194785344)
# v88f = vfmadd(v3f, v87f, 1.26739901455873)
# v94f = vfmadd(v3f, v93f, 3.37367334657285)
# v89f = vfmadd(v3f, v88f, 1.62356584489367)
# v95f = vfmadd(v3f, v94f, 2.75143870676376)
# v90f = vfmadd(v3f, v89f, 0.99992114009714)
# v96f = vfmadd(v3f, v95f, 1.0)
# # v97f = v56f * v90f
# # v98f = Base.FastMath.div_fast(v97f, v96f)
# v97f = v90f / v96f
v97f = erf_v97f_kernel(v3f)
v99f = vfnmadd(v97f, v56f, 1.0)
# v99f = sub_ieee(1.0, v98f)
v104f = copysign(v99f, v0f)
v105f = ifelse(m23, v84f, v104f)
return v105f
end
# # erftest(x) = (1 - erf(x)) / VectorizationBase.erf_v56f_kernel(abs(x),x*x)
# # N,D,E,X = ratfn_minimax(erftest, [big"2.2", big"5.9215871960"], 5, 6); @show(E); N
# erf(2.3) = 1.0 - v98f
# (1.0 - erf(2.3) / v56f(2.3)) = v97f
# (1.0 - erf(2.3) / v56f(2.3)) = v90f / v96f
# rational polynomial, degree 5 / degree 6
@inline function verf(v0f::Union{Float32,AbstractSIMD{<:Any,Float32}})
v3f = abs(v0f)
m4 = v3f < 0.6666667f0
v8f = v3f * v3f
if vany(collapse_or(m4)) # L7
v9f = vfmadd(v8f, -0.00060952053f0, 0.005013293f0)
v10f = vfmadd(v8f, v9f, -0.026780106f0)
v11f = vfmadd(v8f, v10f, 0.11282183f0)
v12f = vfmadd(v8f, v11f, -0.37612528f0)
v13f = vfmadd(v8f, v12f, 1.1283791f0)
v17f = v13f * v0f
vall(collapse_and(m4)) && return v17f
else
v17f = _vundef(v0f)
end
v18f = v3f + 1.0f0
v19f = Base.FastMath.div_fast(v3f, v18f)
v20f = v19f - 0.4f0
v23f = -1.442695f0 * v8f
v24f = round(v23f)
v25f = vfmsub(v24f, -0.6933594f0, v8f)
v26f = vfmadd(v24f, 0.00021219444f0, v25f)
v27f = vfmadd(v26f, 0.0013997796f0, 0.008395563f0)
v28f = vfmadd(v26f, v27f, 0.0416654f0)
v31f = v26f * v26f
v29f = vfmadd(v26f, v28f, 0.16666277f0)
v30f = vfmadd(v26f, v29f, 0.5f0)
v32f = vfmadd(v30f, v31f, v26f)
v33f = v32f + 1.0f0
m34 = v8f ≤ 88.37626f0
v42f = vscalef(m34, v33f, v24f, zero(v0f))
v43f = vfmadd(v20f, -2.6283f0, 6.702909f0)
v44f = vfmadd(v20f, v43f, -6.4094872f0)
v45f = vfmadd(v20f, v44f, 3.2607658f0)
v46f = vfmadd(v20f, v45f, -1.364514f0)
v47f = vfmadd(v20f, v46f, 0.15627646f0)
v48f = vfmadd(v20f, v47f, 0.14205085f0)
v49f = vfmadd(v20f, v48f, 0.38435692f0)
v50f = vfmadd(v20f, v49f, 0.16037047f0)
v51f = vfmadd(v20f, v50f, -1.1370356f0)
v52f = vfmadd(v20f, v51f, 0.5392844f0)
v53f = v52f * v42f
v54f = 1.0f0 - v53f
m55 = v0f < zero(v0f)
v56f = -v54f
v57f = ifelse(m55, v56f, v54f)
v58f = ifelse(m4, v17f, v57f)
m59 = v3f ≠ Inf32
m60 = v0f > zero(v0f)
v61f = ifelse(m60, one(v0f), zero(v0f))
v62f = ifelse(m55, one(v0f), zero(v0f))
v63f = v61f - v62f
v65f = ifelse(v0f == v0f, v63f, NaN32)
v66f = ifelse(m59, v58f, v65f)
return v66f
end
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 1246 |
# Overloadable method, e.g to insert OffsetPrecalc's precalculated stride multiples
@inline tdot(ptr::AbstractStridedPointer, ::Tuple{}, ::Tuple{}) =
(pointer(ptr), Zero())
@inline tdot(ptr::AbstractStridedPointer{T}, a, b) where {T} =
tdot(pointer(ptr), a, b)
@inline function tdot(p::Ptr{T}, a::Tuple{A}, b::Tuple{B,Vararg}) where {T,A,B}
p, lazymul(first(a), first(b))
end
@inline function tdot(
p::Ptr{T},
a::Tuple{A},
b::Tuple{B,Vararg},
c::Tuple{C,Vararg}
) where {T,A,B,C}
p, lazymul(first(a), first(b), first(c))
end
@inline function tdot(
p::Ptr{T},
a::Tuple{A1,A2,Vararg},
b::Tuple{B1,B2,Vararg}
) where {T,A1,A2,B1,B2}
i = lazymul(first(a), first(b))
p, j = tdot(p, tail(a), tail(b))
add_indices(p, i, j)
end
@inline function tdot(
p::Ptr{T},
a::Tuple{A1,A2,Vararg},
b::Tuple{B1,B2,Vararg},
c::Tuple{C1,C2,Vararg}
) where {T,A1,A2,B1,B2,C1,C2}
i = lazymul(first(a), first(b), first(c))
p, j = tdot(p, tail(a), tail(b), tail(c))
add_indices(p, i, j)
end
@inline function tdot(
p::Ptr{T},
a::Tuple{A1,A2,Vararg},
b::Tuple{B1,B2,Vararg},
c::Tuple{C1}
) where {T,A1,A2,B1,B2,C1}
i = lazymul(first(a), first(b), first(c))
p, j = tdot(p, tail(a), tail(b))
add_indices(p, i, j)
end
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 4373 | struct OffsetPrecalc{
T,
N,
C,
B,
R,
X,
M,
P<:AbstractStridedPointer{T,N,C,B,R,X,M},
I
} <: AbstractStridedPointer{T,N,C,B,R,X,M}
ptr::P
precalc::I
end
@inline Base.pointer(ptr::OffsetPrecalc) = pointer(getfield(ptr, :ptr))
@inline Base.similar(ptr::OffsetPrecalc, p::Ptr) =
OffsetPrecalc(similar(getfield(ptr, :ptr), p), getfield(ptr, :precalc))
# @inline pointerforcomparison(p::OffsetPrecalc) = pointerforcomparison(getfield(p, :ptr))
# @inline pointerforcomparison(p::OffsetPrecalc, i::Tuple) = pointerforcomparison(p.ptr, i)
@inline offsetprecalc(x, ::Any) = x
@inline offsetprecalc(x::OffsetPrecalc, ::Val) = x
@inline offsetprecalc(x::StridedBitPointer, ::Val) = x
# @inline pointerforcomparison(p::AbstractStridedPointer) = pointer(p)
# @inline pointerforcomparison(p::AbstractStridedPointer, i) = gep(p, i)
@inline ArrayInterface.offsets(p::OffsetPrecalc) = offsets(getfield(p, :ptr))
@inline Base.strides(p::OffsetPrecalc) = static_strides(getfield(p, :ptr))
@inline ArrayInterface.static_strides(p::OffsetPrecalc) =
static_strides(getfield(p, :ptr))
@inline function LayoutPointers.similar_no_offset(sptr::OffsetPrecalc, ptr::Ptr)
OffsetPrecalc(
similar_no_offset(getfield(sptr, :ptr), ptr),
getfield(sptr, :precalc)
)
end
@inline function LayoutPointers.similar_with_offset(
sptr::OffsetPrecalc,
ptr::Ptr,
off::Tuple
)
OffsetPrecalc(
similar_with_offset(getfield(sptr, :ptr), ptr, off),
getfield(sptr, :precalc)
)
end
@inline LayoutPointers.bytestrides(p::OffsetPrecalc) =
bytestrides(getfield(p, :ptr))
@inline LayoutPointers.bytestrideindex(p::OffsetPrecalc) =
LayoutPointers.bytestrideindex(getfield(p, :ptr))
"""
Basically:
if I ∈ [3,5,7,9]
c[(I - 1) >> 1]
else
b * I
end
because
c = b .* [3, 5, 7, 9]
"""
@generated function lazymul(
::StaticInt{I},
b,
c::Tuple{Vararg{Any,N}}
) where {I,N}
Is = (I - 1) >> 1
ex = if (isodd(I) && 1 ≤ Is ≤ N) && (c.parameters[Is] !== nothing)
Expr(:call, GlobalRef(Core, :getfield), :c, Is, false)
elseif ((I ∈ (6, 10)) && ((I >> 2) ≤ N)) && (c.parameters[I>>2] !== nothing)
Expr(
:call,
:lazymul,
Expr(:call, Expr(:curly, :StaticInt, 2)),
Expr(:call, GlobalRef(Core, :getfield), :c, I >> 2, false)
)
else
Expr(:call, :lazymul, Expr(:call, Expr(:curly, :StaticInt, I)), :b)
end
Expr(:block, Expr(:meta, :inline), ex)
end
@inline lazymul(a, b, c) = lazymul(a, b)
@inline lazymul(a::StaticInt, b, ::Nothing) = lazymul(a, b)
_unwrap(@nospecialize(_::Type{StaticInt{N}})) where {N} = N
_unwrap(@nospecialize(_)) = nothing
# descript is a tuple of (unrollfactor) for each ind; if it shouldn't preallocate, unrollfactor may be set to 1
function precalc_quote_from_descript(
@nospecialize(descript),
contig::Int,
@nospecialize(X)
)
precalc = Expr(:tuple)
anyprecalcs = anydynamicprecals = false
pstrideextracts = Expr(:block)
for (i, uf) ∈ enumerate(descript)
if i > length(X)
break
elseif i == contig || uf < 3
push!(precalc.args, nothing)
else
t = Expr(:tuple)
Xᵢ = X[i]
anyprecalcs = true
if Xᵢ === nothing
anydynamicprecals = true
pstride_i = Symbol(:pstride_, i)
push!(
pstrideextracts.args,
Expr(
:(=),
pstride_i,
Expr(:call, GlobalRef(Core, :getfield), :pstride, i, false)
)
)
for u = 3:2:uf
push!(t.args, Expr(:call, :vmul_nw, u, pstride_i))
end
else
for u = 3:2:uf
push!(t.args, static(u * Xᵢ))
end
end
push!(precalc.args, t)
end
end
q = Expr(:block, Expr(:meta, :inline))
if anydynamicprecals
push!(q.args, :(pstride = static_strides(p)))
push!(q.args, pstrideextracts)
end
if anyprecalcs
push!(q.args, Expr(:call, :OffsetPrecalc, :p, precalc))
else
push!(q.args, :p)
end
q
end
@generated function offsetprecalc(
p::AbstractStridedPointer{T,N,C,B,R,X,O},
::Val{descript}
) where {T,N,C,B,R,X,O,descript}
x = known(X)
any(isnothing, x) || return Expr(:block, Expr(:meta, :inline), :p)
precalc_quote_from_descript(descript, C, x)
end
@inline tdot(ptr::OffsetPrecalc{T}, a, b) where {T} =
tdot(pointer(ptr), a, b, getfield(ptr, :precalc))
@inline tdot(ptr::OffsetPrecalc, ::Tuple{}, ::Tuple{}) = (pointer(ptr), Zero())
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 16573 |
@inline vstore!(ptr::AbstractStridedPointer{T}, v::Number) where {T<:Number} =
__vstore!(
pointer(ptr),
convert(T, v),
False(),
False(),
False(),
register_size()
)
using LayoutPointers: nopromote_axis_indicator
@inline _vload(
ptr::AbstractStridedPointer{T,0},
i::Tuple{},
::A,
::StaticInt{RS}
) where {T,A<:StaticBool,RS} = __vload(pointer(ptr), A(), StaticInt{RS}())
@inline gep(ptr::AbstractStridedPointer{T,0}, i::Tuple{}) where {T} =
pointer(ptr)
# terminating
@inline _offset_index(i::Tuple{}, offset::Tuple{}) = ()
@inline _offset_index(
i::Tuple{I1},
offset::Tuple{I2,I3,Vararg}
) where {I1,I2,I3} = (vsub_nsw(only(i), first(offset)),)
@inline _offset_index(
i::Tuple{I1,I2,Vararg},
offset::Tuple{I3}
) where {I1,I2,I3} = (vsub_nsw(first(i), first(offset)),)
@inline _offset_index(i::Tuple{I1}, offset::Tuple{I2}) where {I1,I2} =
(vsub_nsw(only(i), only(offset)),)
# iterating
@inline _offset_index(
i::Tuple{I1,I2,Vararg},
offset::Tuple{I3,I4,Vararg}
) where {I1,I2,I3,I4} = (
vsub_nsw(first(i), first(offset)),
_offset_index(Base.tail(i), Base.tail(offset))...
)
@inline offset_index(ptr, i) = _offset_index(i, offsets(ptr))
@inline linear_index(ptr, i) =
tdot(ptr, offset_index(ptr, i), static_strides(ptr))
# Fast compile path?
@inline function _vload(
ptr::AbstractStridedPointer,
i::Tuple,
::A,
::StaticInt{RS}
) where {A<:StaticBool,RS}
p, li = linear_index(ptr, i)
__vload(p, li, A(), StaticInt{RS}())
end
@inline function _vload(
ptr::AbstractStridedPointer,
i::Tuple,
m::Union{AbstractMask,Bool},
::A,
::StaticInt{RS}
) where {A<:StaticBool,RS}
p, li = linear_index(ptr, i)
__vload(p, li, m, A(), StaticInt{RS}())
end
@inline function _vload(
ptr::AbstractStridedPointer{T},
i::Tuple{I},
::A,
::StaticInt{RS}
) where {T,I,A<:StaticBool,RS}
p, li = tdot(ptr, i, static_strides(ptr))
__vload(p, li, A(), StaticInt{RS}())
end
@inline function _vload(
ptr::AbstractStridedPointer{T},
i::Tuple{I},
m::Union{AbstractMask,Bool},
::A,
::StaticInt{RS}
) where {T,I,A<:StaticBool,RS}
p, li = tdot(ptr, i, static_strides(ptr))
__vload(p, li, m, A(), StaticInt{RS}())
end
# Ambiguity: 1-dimensional + 1-dim index -> Cartesian (offset) indexing
@inline function _vload(
ptr::AbstractStridedPointer{T,1},
i::Tuple{I},
::A,
::StaticInt{RS}
) where {T,I,A<:StaticBool,RS}
p, li = linear_index(ptr, i)
__vload(p, li, A(), StaticInt{RS}())
end
@inline function _vload(
ptr::AbstractStridedPointer{T,1},
i::Tuple{I},
m::Union{AbstractMask,Bool},
::A,
::StaticInt{RS}
) where {T,I,A<:StaticBool,RS}
p, li = linear_index(ptr, i)
__vload(p, li, m, A(), StaticInt{RS}())
end
@generated function _vload(
ptr::AbstractStridedPointer{T,1},
i::Tuple{I},
m::VecUnroll{Nm1},
::A,
::StaticInt{RS}
) where {T,Nm1,I<:VecUnroll{Nm1},A<:StaticBool,RS}
t = Expr(:tuple)
for n = 1:Nm1+1
push!(
t.args,
:(_vload(
ptr,
(getfield(ii, $n),),
getfield(mm, $n),
$(A()),
$(StaticInt{RS}())
))
)
end
Expr(
:block,
Expr(:meta, :inline),
:(ii = getfield(getfield(i, 1), 1)),
:(mm = getfield(m, 1)),
Expr(:call, VecUnroll, t)
)
end
# align, noalias, nontemporal
@inline function _vstore!(
ptr::AbstractStridedPointer,
v,
i::Tuple,
::A,
::S,
::NT,
::StaticInt{RS}
) where {A<:StaticBool,S<:StaticBool,NT<:StaticBool,RS}
p, li = linear_index(ptr, i)
__vstore!(p, v, li, A(), S(), NT(), StaticInt{RS}())
end
@inline function _vstore!(
ptr::AbstractStridedPointer,
v,
i::Tuple,
m::Union{AbstractMask,Bool},
::A,
::S,
::NT,
::StaticInt{RS}
) where {A<:StaticBool,S<:StaticBool,NT<:StaticBool,RS}
p, li = linear_index(ptr, i)
__vstore!(p, v, li, m, A(), S(), NT(), StaticInt{RS}())
end
@inline function _vstore!(
ptr::AbstractStridedPointer{T},
v,
i::Tuple{I},
::A,
::S,
::NT,
::StaticInt{RS}
) where {T,I,A<:StaticBool,S<:StaticBool,NT<:StaticBool,RS}
p, li = tdot(ptr, i, static_strides(ptr))
__vstore!(p, v, li, A(), S(), NT(), StaticInt{RS}())
end
@inline function _vstore!(
ptr::AbstractStridedPointer{T},
v,
i::Tuple{I},
m::Union{AbstractMask,Bool},
::A,
::S,
::NT,
::StaticInt{RS}
) where {T,I,A<:StaticBool,S<:StaticBool,NT<:StaticBool,RS}
p, li = tdot(ptr, i, static_strides(ptr))
__vstore!(p, v, li, m, A(), S(), NT(), StaticInt{RS}())
end
@inline function _vstore!(
ptr::AbstractStridedPointer{T,1},
v,
i::Tuple{I},
::A,
::S,
::NT,
::StaticInt{RS}
) where {T,I,A<:StaticBool,S<:StaticBool,NT<:StaticBool,RS}
p, li = linear_index(ptr, i)
__vstore!(p, v, li, A(), S(), NT(), StaticInt{RS}())
end
@inline function _vstore!(
ptr::AbstractStridedPointer{T,1},
v,
i::Tuple{I},
m::Union{AbstractMask,Bool},
::A,
::S,
::NT,
::StaticInt{RS}
) where {T,I,A<:StaticBool,S<:StaticBool,NT<:StaticBool,RS}
p, li = linear_index(ptr, i)
__vstore!(p, v, li, m, A(), S(), NT(), StaticInt{RS}())
end
@inline function _vstore!(
f::F,
ptr::AbstractStridedPointer,
v,
i::Tuple,
::A,
::S,
::NT,
::StaticInt{RS}
) where {F,A<:StaticBool,S<:StaticBool,NT<:StaticBool,RS}
p, li = linear_index(ptr, i)
__vstore!(f, p, v, li, A(), S(), NT(), StaticInt{RS}())
end
@inline function _vstore!(
f::F,
ptr::AbstractStridedPointer,
v,
i::Tuple,
m::Union{AbstractMask,Bool},
::A,
::S,
::NT,
::StaticInt{RS}
) where {F,A<:StaticBool,S<:StaticBool,NT<:StaticBool,RS}
p, li = linear_index(ptr, i)
__vstore!(f, p, v, li, m, A(), S(), NT(), StaticInt{RS}())
end
@inline function _vstore!(
f::F,
ptr::AbstractStridedPointer{T},
v,
i::Tuple{I},
::A,
::S,
::NT,
::StaticInt{RS}
) where {F,T,I,A<:StaticBool,S<:StaticBool,NT<:StaticBool,RS}
p, li = tdot(ptr, i, static_strides(ptr))
__vstore!(f, p, v, li, A(), S(), NT(), StaticInt{RS}())
end
@inline function _vstore!(
f::F,
ptr::AbstractStridedPointer{T},
v,
i::Tuple{I},
m::Union{AbstractMask,Bool},
::A,
::S,
::NT,
::StaticInt{RS}
) where {F,T,I,A<:StaticBool,S<:StaticBool,NT<:StaticBool,RS}
p, li = tdot(ptr, i, static_strides(ptr))
__vstore!(f, p, v, li, m, A(), S(), NT(), StaticInt{RS}())
end
@inline function _vstore!(
f::F,
ptr::AbstractStridedPointer{T,1},
v,
i::Tuple{I},
::A,
::S,
::NT,
::StaticInt{RS}
) where {F,T,I,A<:StaticBool,S<:StaticBool,NT<:StaticBool,RS}
p, li = linear_index(ptr, i)
__vstore!(f, p, v, li, A(), S(), NT(), StaticInt{RS}())
end
@inline function _vstore!(
f::F,
ptr::AbstractStridedPointer{T,1},
v,
i::Tuple{I},
m::Union{AbstractMask,Bool},
::A,
::S,
::NT,
::StaticInt{RS}
) where {F,T,I,A<:StaticBool,S<:StaticBool,NT<:StaticBool,RS}
p, li = linear_index(ptr, i)
__vstore!(f, p, v, li, m, A(), S(), NT(), StaticInt{RS}())
end
@inline function gep(
ptr::AbstractStridedPointer{T,N,C,B,R,X,NTuple{N,StaticInt{0}}},
i::Tuple{Vararg{Any,N}}
) where {T,N,C,B,R,X}
p, li = tdot(ptr, i, static_strides(ptr))
gep(p, li)
end
@inline function gep(
ptr::AbstractStridedPointer{T,N,C,B,R,X,O},
i::Tuple
) where {T,N,C,B,R,X,O}
p, li = linear_index(ptr, i)
gep(p, li)
end
@inline function gep(ptr::AbstractStridedPointer{T}, i::Tuple{I}) where {T,I}
p, li = tdot(ptr, i, static_strides(ptr))
gep(p, li)
end
@inline function gep(
ptr::AbstractStridedPointer{T,1,C,B,R,X,O},
i::Tuple{I}
) where {T,I,C,B,R,X,O}
p, li = linear_index(ptr, i)
gep(p, li)
end
@inline function gep(
ptr::AbstractStridedPointer{T,1,C,B,R,X,Tuple{StaticInt{0}}},
i::Tuple{I}
) where {T,I,C,B,R,X}
p, li = tdot(ptr, i, static_strides(ptr))
gep(p, li)
end
# There is probably a smarter way to do indexing adjustment here.
# The reasoning for the current approach of geping for Zero() on extracted inds
# and for offsets otherwise is best demonstrated witht his motivational example:
#
# A = OffsetArray(rand(10,11), 6:15, 5:15);
# for i in 6:15
# s += A[i,i]
# end
# first access is at zero-based index
# (first(6:16) - ArrayInterface.offsets(a)[1]) * ArrayInterface.static_strides(A)[1] + (first(6:16) - ArrayInterface.offsets(a)[2]) * ArrayInterface.static_strides(A)[2]
# equal to
# (6 - 6)*1 + (6 - 5)*10 = 10
# i.e., the 1-based index 11.
# So now we want to adjust the offsets and pointer's value
# so that indexing it with a single `i` (after summing static_strides() is correct.
# Moving to 0-based indexing is easiest for combining static_strides(. So we gep by 0 on these inds.
# E.g., gep(stridedpointer(A), (0,0))
# ptr += (0 - 6)*1 + (0 - 5)*10 = -56
# new stride = 1 + 10 = 11
# new_offset = 0
# now if we access the 6th element
# (6 - new_offse) * new_stride
# ptr += (6 - 0) * 11 = 66
# cumulative:
# ptr = pointer(A) + 66 - 56 = pointer(A) + 10
# so initial load is of pointer(A) + 10 -> the 11th element w/ 1-based indexing
function double_index_quote(C, B, R::NTuple{N,Int}, I1::Int, I2::Int) where {N}
# place into position of second arg
J1 = I1 + 1
J2 = I2 + 1
@assert (J1 != B) & (J2 != B)
Cnew = ((C == J1) | (C == J2)) ? -1 : (C - (J1 < C))
strd = Expr(:tuple)
offs = Expr(:tuple)
inds = Expr(:tuple)
Rtup = Expr(:tuple)
si = Expr(:curly, GlobalRef(ArrayInterface, :StrideIndex), N - 1, Rtup, Cnew)
for n = 1:N
if n == J1
push!(inds.args, :(Zero()))
elseif n == J2
arg1 = Expr(:call, getfield, :strd, J1, false)
arg2 = Expr(:call, getfield, :strd, J2, false)
push!(strd.args, Expr(:call, :+, arg1, arg2))
push!(offs.args, :(Zero()))
push!(inds.args, :(Zero()))
push!(Rtup.args, max(R[J1], R[J2]))
else
push!(strd.args, Expr(:call, getfield, :strd, n, false))
push!(offs.args, Expr(:call, getfield, :offs, n, false))
push!(inds.args, Expr(:call, getfield, :offs, n, false))
push!(Rtup.args, R[n])
end
end
gepedptr = Expr(:call, :gep, :ptr, inds)
newptr = Expr(
:call,
:stridedpointer,
gepedptr,
Expr(:call, si, strd, offs),
:(StaticInt{$B}())
)
Expr(
:block,
Expr(:meta, :inline),
:(strd = static_strides(ptr)),
:(offs = offsets(ptr)),
newptr
)
end
@generated function double_index(
ptr::AbstractStridedPointer{T,N,C,B,R},
::Val{I1},
::Val{I2}
) where {T,N,C,B,R,I1,I2}
double_index_quote(C, B, R, I1, I2)
end
using LayoutPointers: FastRange
# `FastRange{<:Union{Integer,StaticInt}}` can ignore the offset
@inline vload(
r::FastRange{T,Zero},
i::Tuple{I}
) where {T<:Union{Integer,StaticInt},I} =
convert(T, getfield(r, :o)) + convert(T, getfield(r, :s)) * first(i)
@inline function vload(r::FastRange{T}, i::Tuple{I}) where {T<:FloatingTypes,I}
convert(T, getfield(r, :f)) +
convert(T, getfield(r, :s)) * (only(i) + convert(T, getfield(r, :o)))
end
@inline function gesp(
r::FastRange{T,Zero},
i::Tuple{I}
) where {I,T<:Union{Integer,StaticInt}}
s = getfield(r, :s)
FastRange{T}(Zero(), s, only(i) * s + getfield(r, :o))
end
@inline function gesp(r::FastRange{T}, i::Tuple{I}) where {I,T<:FloatingTypes}
FastRange{T}(getfield(r, :f), getfield(r, :s), only(i) + getfield(r, :o))
end
@inline gesp(
r::FastRange{T,Zero},
i::Tuple{NullStep}
) where {T<:Union{Integer,StaticInt}} = r
@inline gesp(r::FastRange{T}, i::Tuple{NullStep}) where {T<:FloatingTypes} = r
@inline increment_ptr(
r::FastRange{T,Zero},
i::Tuple{I}
) where {I,T<:Union{Integer,StaticInt}} =
only(i) * getfield(r, :s) + getfield(r, :o)
@inline increment_ptr(
r::FastRange{T},
i::Tuple{I}
) where {I,T<:Union{Integer,StaticInt}} = only(i) + getfield(r, :o)
@inline increment_ptr(r::FastRange) = getfield(r, :o)
@inline increment_ptr(::FastRange{T}, o, i::Tuple{I}) where {I,T} =
vadd_nsw(only(i), o)
@inline increment_ptr(r::FastRange{T,Zero}, o, i::Tuple{I}) where {I,T} =
vadd_nsw(vmul_nsw(only(i), getfield(r, :s)), o)
@inline reconstruct_ptr(r::FastRange{T}, o) where {T} =
FastRange{T}(getfield(r, :f), getfield(r, :s), o)
@inline vload(r::FastRange, i, m::AbstractMask) =
(v = vload(r, i); ifelse(m, v, zero(v)))
@inline vload(r::FastRange, i, m::Bool) =
(v = vload(r, i); ifelse(m, v, zero(v)))
@inline _vload(r::FastRange, i, _, __) = vload(r, i)
@inline _vload(r::FastRange, i, m::AbstractMask, __, ___) = vload(r, i, m)
@inline _vload(
r::FastRange,
i,
m::VecUnroll{<:Any,<:Any,<:Union{Bool,Bit}},
__,
___
) = vload(r, i, m)
function _vload_fastrange_unroll(
AU::Int,
F::Int,
N::Int,
AV::Int,
W::Int,
M::UInt,
X::Int,
mask::Bool,
vecunrollmask::Bool
)
t = Expr(:tuple)
inds = unrolled_indicies(1, AU, F, N, AV, W, X)
q = quote
$(Expr(:meta, :inline))
gptr = gesp(r, data(u))
end
vecunrollmask && push!(q.args, :(masktup = data(vm)))
gf = GlobalRef(Core, :getfield)
for n = 1:N
l = Expr(:call, :vload, :gptr, inds[n])
if vecunrollmask
push!(l.args, :($gf(masktup, $n, false)))
elseif mask & (M % Bool)
push!(l.args, :m)
end
M >>= 1
push!(t.args, l)
end
push!(q.args, :(VecUnroll($t)))
q
end
"""
For structs wrapping arrays, using `GC.@preserve` can trigger heap allocations.
`preserve_buffer` attempts to extract the heap-allocated part. Isolating it by itself
will often allow the heap allocations to be elided. For example:
```julia
julia> using StaticArrays, BenchmarkTools
julia> # Needed until a release is made featuring https://github.com/JuliaArrays/StaticArrays.jl/commit/a0179213b741c0feebd2fc6a1101a7358a90caed
Base.elsize(::Type{<:MArray{S,T}}) where {S,T} = sizeof(T)
julia> @noinline foo(A) = unsafe_load(A, 1)
foo (generic function with 1 method)
julia> function alloc_test_1()
A = view(MMatrix{8,8,Float64}(undef), 2:5, 3:7)
A[begin] = 4
GC.@preserve A foo(pointer(A))
end
alloc_test_1 (generic function with 1 method)
julia> function alloc_test_2()
A = view(MMatrix{8,8,Float64}(undef), 2:5, 3:7)
A[begin] = 4
pb = parent(A) # or `LoopVectorization.preserve_buffer(A)`; `perserve_buffer(::SubArray)` calls `parent`
GC.@preserve pb foo(pointer(A))
end
alloc_test_2 (generic function with 1 method)
julia> @benchmark alloc_test_1()
BenchmarkTools.Trial:
memory estimate: 544 bytes
allocs estimate: 1
--------------
minimum time: 17.227 ns (0.00% GC)
median time: 21.352 ns (0.00% GC)
mean time: 26.151 ns (13.33% GC)
maximum time: 571.130 ns (78.53% GC)
--------------
samples: 10000
evals/sample: 998
julia> @benchmark alloc_test_2()
BenchmarkTools.Trial:
memory estimate: 0 bytes
allocs estimate: 0
--------------
minimum time: 3.275 ns (0.00% GC)
median time: 3.493 ns (0.00% GC)
mean time: 3.491 ns (0.00% GC)
maximum time: 4.998 ns (0.00% GC)
--------------
samples: 10000
evals/sample: 1000
```
"""
@inline preserve_buffer(A::AbstractArray) = A
@inline preserve_buffer(
A::Union{
LinearAlgebra.Transpose,
LinearAlgebra.Adjoint,
Base.ReinterpretArray,
Base.ReshapedArray,
PermutedDimsArray,
SubArray
}
) = preserve_buffer(parent(A))
@inline preserve_buffer(x) = x
function llvmptr_comp_quote(cmp, Tsym)
pt = Expr(:curly, GlobalRef(Core, :LLVMPtr), Tsym, 0)
instrs = "%cmpi1 = icmp $cmp i8* %0, %1\n%cmpi8 = zext i1 %cmpi1 to i8\nret i8 %cmpi8"
Expr(
:block,
Expr(:meta, :inline),
:($(Base.llvmcall)($instrs, Bool, Tuple{$pt,$pt}, p1, p2))
)
end
@inline llvmptrd(p::Ptr) = reinterpret(Core.LLVMPtr{Float64,0}, p)
@inline llvmptrd(p::AbstractStridedPointer) = llvmptrd(pointer(p))
for (op, f, cmp) ∈ [
(:(<), :vlt, "ult"),
(:(>), :vgt, "ugt"),
(:(≤), :vle, "ule"),
(:(≥), :vge, "uge"),
(:(==), :veq, "eq"),
(:(≠), :vne, "ne")
]
@eval begin
@generated function $f(
p1::Core.LLVMPtr{T,0},
p2::Core.LLVMPtr{T,0}
) where {T}
llvmptr_comp_quote($cmp, JULIA_TYPES[T])
end
@inline Base.$op(p1::P, p2::P) where {P<:AbstractStridedPointer} =
$f(llvmptrd(p1), llvmptrd(p2))
@inline Base.$op(p1::P, p2::P) where {P<:StridedBitPointer} =
$op(linearize(p1), linearize(p2))
@inline Base.$op(p1::P, p2::P) where {P<:FastRange} =
$op(getfield(p1, :o), getfield(p2, :o))
@inline $f(p1::Ptr, p2::Ptr, sp::AbstractStridedPointer) =
$f(llvmptrd(p1), llvmptrd(p2))
@inline $f(p1::NTuple{N,Int}, p2::NTuple{N,Int}, sp) where {N} =
$op(reconstruct_ptr(sp, p1), reconstruct_ptr(sp, p2))
@inline $f(a, b, c) = $f(a, b)
end
end
@inline linearize(p::StridedBitPointer) =
-sum(map(*, static_strides(p), offsets(p)))
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 12997 | # unary # 2^2 - 2 = 2 definitions
@inline fmap(f::F, x::Tuple{X}) where {F,X} = (f(first(x)),)
@inline fmap(f::F, x::NTuple) where {F} =
(f(first(x)), fmap(f, Base.tail(x))...)
# binary # 2^3 - 2 = 6 definitions
@inline fmap(f::F, x::Tuple{X}, y::Tuple{Y}) where {F,X,Y} =
(f(first(x), first(y)),)
@inline fmap(f::F, x::Tuple{X}, y) where {F,X} = (f(first(x), y),)
@inline fmap(f::F, x, y::Tuple{Y}) where {F,Y} = (f(x, first(y)),)
@inline fmap(
f::F,
x::Tuple{Vararg{Any,N}},
y::Tuple{Vararg{Any,N}}
) where {F,N} = (f(first(x), first(y)), fmap(f, Base.tail(x), Base.tail(y))...)
@inline fmap(f::F, x::Tuple, y) where {F} =
(f(first(x), y), fmap(f, Base.tail(x), y)...)
@inline fmap(f::F, x, y::Tuple) where {F} =
(f(x, first(y)), fmap(f, x, Base.tail(y))...)
fmap(f::F, x::Tuple{X}, y::Tuple) where {F,X} = throw("Dimension mismatch.")
fmap(f::F, x::Tuple, y::Tuple{Y}) where {F,Y} = throw("Dimension mismatch.")
fmap(f::F, x::Tuple, y::Tuple) where {F} = throw("Dimension mismatch.")
@generated function fmap(f::F, x::Vararg{Any,N}) where {F,N}
q = Expr(:block, Expr(:meta, :inline))
t = Expr(:tuple)
U = 1
call = Expr(:call, :f)
syms = Vector{Symbol}(undef, N)
istup = Vector{Bool}(undef, N)
gf = GlobalRef(Core, :getfield)
for n ∈ 1:N
syms[n] = xₙ = Symbol(:x_, n)
push!(q.args, Expr(:(=), xₙ, Expr(:call, gf, :x, n, false)))
istup[n] = ist = (x[n] <: Tuple)
if ist
U = length(x[n].parameters)
push!(call.args, Expr(:call, gf, xₙ, 1, false))
else
push!(call.args, xₙ)
end
end
push!(t.args, call)
for u ∈ 2:U
call = Expr(:call, :f)
for n ∈ 1:N
xₙ = syms[n]
if istup[n]
push!(call.args, Expr(:call, gf, xₙ, u, false))
else
push!(call.args, xₙ)
end
end
push!(t.args, call)
end
push!(q.args, t)
q
end
for op ∈ [
:vsub,
:vabs,
:vfloor,
:vceil,
:vtrunc,
:vround,
:vsqrt,
:vnot,
:vleading_zeros,
:vtrailing_zeros,
:vsub_fast,
:vcount_ones
]
@eval @inline $op(v1::VecUnroll{N,W,T}) where {N,W,T} =
VecUnroll(fmap($op, getfield(v1, :data)))
end
# only for `Float32` and `Float64`
@inline inv_approx(v::VecUnroll{N,W,T}) where {N,W,T<:Union{Float32,Float64}} =
VecUnroll(fmap(inv_approx, getfield(v, :data)))
@inline vreinterpret(::Type{T}, v::VecUnroll) where {T<:Number} =
VecUnroll(fmap(vreinterpret, T, getfield(v, :data)))
for op ∈ [
:vadd,
:vsub,
:vmul,
:vand,
:vor,
:vxor,
:vlt,
:vle,
:vgt,
:vge,
:veq,
:vne,
:vadd_fast,
:vsub_fast,
:vmul_fast,
:vsub_nsw,
:vadd_nsw,
:vmul_nsw,
:vsub_nw,
:vadd_nw,
:vmul_nw,
:vsub_nuw,
:vadd_nuw,
:vmul_nuw
]
@eval begin
@inline $op(v1::VecUnroll, v2::VecUnroll) =
VecUnroll(fmap($op, getfield(v1, :data), getfield(v2, :data)))
@inline $op(v1::VecUnroll, v2::VecOrScalar) =
VecUnroll(fmap($op, getfield(v1, :data), v2))
@inline $op(v1::VecOrScalar, v2::VecUnroll) =
VecUnroll(fmap($op, v1, getfield(v2, :data)))
@inline $op(v1::VecUnroll{N,W,T,V}, ::StaticInt{M}) where {N,W,T,V,M} =
VecUnroll(fmap($op, getfield(v1, :data), vbroadcast(Val{W}(), T(M))))
@inline $op(::StaticInt{M}, v1::VecUnroll{N,W,T,V}) where {N,W,T,V,M} =
VecUnroll(fmap($op, vbroadcast(Val{W}(), T(M)), getfield(v1, :data)))
@inline $op(v1::VecUnroll{N,W,T,V}, ::StaticInt{1}) where {N,W,T,V} =
VecUnroll(fmap($op, getfield(v1, :data), One()))
@inline $op(::StaticInt{1}, v1::VecUnroll{N,W,T,V}) where {N,W,T,V} =
VecUnroll(fmap($op, One(), getfield(v1, :data)))
@inline $op(v1::VecUnroll{N,W,T,V}, ::StaticInt{0}) where {N,W,T,V} =
VecUnroll(fmap($op, getfield(v1, :data), Zero()))
@inline $op(::StaticInt{0}, v1::VecUnroll{N,W,T,V}) where {N,W,T,V} =
VecUnroll(fmap($op, Zero(), getfield(v1, :data)))
end
end
for op ∈ [:vmax, :vmax_fast, :vmin, :vmin_fast, :vcopysign]
@eval begin
@inline $op(v1::VecUnroll, v2::VecUnroll) =
VecUnroll(fmap($op, getfield(v1, :data), getfield(v2, :data)))
end
end
for op ∈
[:vgt, :vge, :vlt, :vle, :veq, :vne, :vmax, :vmax_fast, :vmin, :vmin_fast]
@eval begin
@inline function $op(
v::VecUnroll{N,W,T,V},
s::Union{NativeTypes,AbstractSIMDVector}
) where {N,W,T,V}
VecUnroll(fmap($op, getfield(v, :data), vbroadcast(Val{W}(), s)))
end
@inline function $op(
s::Union{NativeTypes,AbstractSIMDVector},
v::VecUnroll{N,W,T,V}
) where {N,W,T,V}
VecUnroll(fmap($op, vbroadcast(Val{W}(), s), getfield(v, :data)))
end
end
end
for op ∈ [:vrem, :vshl, :vashr, :vlshr, :vdiv, :vfdiv, :vrem_fast, :vfdiv_fast]
@eval begin
@inline $op(v1::VecUnroll, v2::VecUnroll) =
VecUnroll(fmap($op, getfield(v1, :data), getfield(v2, :data)))
end
end
for op ∈ [
:vrem,
:vand,
:vor,
:vxor,
:vshl,
:vashr,
:vlshr,
:vlt,
:vle,
:vgt,
:vge,
:veq,
:vne
]
@eval begin
@inline $op(vu::VecUnroll, i::MM) = $op(vu, Vec(i))
@inline $op(i::MM, vu::VecUnroll) = $op(Vec(i), vu)
end
end
for op ∈ [:vshl, :vashr, :vlshr]
@eval begin
@inline $op(m::AbstractMask, vu::VecUnroll) = $op(Vec(m), vu)
@inline $op(v1::AbstractSIMDVector, v2::VecUnroll) =
VecUnroll(fmap($op, v1, getfield(v2, :data)))
@inline $op(v1::VecUnroll, v2::AbstractSIMDVector) =
VecUnroll(fmap($op, getfield(v1, :data), v2))
end
end
for op ∈ [:rotate_left, :rotate_right, :funnel_shift_left, :funnel_shift_right]
@eval begin
@inline $op(v1::VecUnroll{N,W,T,V}, v2::R) where {N,W,T,V,R<:IntegerTypes} =
VecUnroll(fmap($op, getfield(v1, :data), promote_type(V, R)(v2)))
@inline $op(v1::R, v2::VecUnroll{N,W,T,V}) where {N,W,T,V,R<:IntegerTypes} =
VecUnroll(fmap($op, promote_type(V, R)(v1), getfield(v2, :data)))
@inline $op(v1::VecUnroll, v2::VecUnroll) =
VecUnroll(fmap($op, getfield(v1, :data), getfield(v2, :data)))
end
end
for op ∈ [
:vfma,
:vmuladd,
:vfma_fast,
:vmuladd_fast,
:vfnmadd,
:vfmsub,
:vfnmsub,
:vfnmadd_fast,
:vfmsub_fast,
:vfnmsub_fast,
:vfmadd231,
:vfnmadd231,
:vfmsub231,
:vfnmsub231,
:ifmahi,
:ifmalo
]
@eval begin
# @generated function $op(v1::VecUnroll{N,W,T1,V1}, v2::VecUnroll{N,W,T2,V2}, v3::VecUnroll{N,W,T3,V3}) where {N,W,T1,T2,T3}
# if T1 <: NativeTypes
# VecUnroll(fmap($op, getfield(v1, :data), getfield(v2, :data), getfield(v3, :data)))
# Expr(:block, Expr(:meta,:inline), ex)
# end
@inline function $op(
v1::VecUnroll{N,W,<:NativeTypesExceptBit},
v2::VecUnroll{N,W,<:NativeTypesExceptBit},
v3::VecUnroll{N,W}
) where {N,W}
a, b, c = promote(v1, v2, v3)
VecUnroll(
fmap($op, getfield(a, :data), getfield(b, :data), getfield(c, :data))
)
end
end
end
@inline function vifelse(
v1::VecUnroll{N,W,<:Boolean},
v2::T,
v3::T
) where {N,W,T<:NativeTypes}
VecUnroll(fmap(vifelse, getfield(v1, :data), Vec{W,T}(v2), Vec{W,T}(v3)))
end
@inline function vifelse(
v1::VecUnroll{N,W,<:Boolean},
v2::T,
v3::T
) where {N,W,T<:Real}
VecUnroll(fmap(vifelse, getfield(v1, :data), v2, v3))
end
@inline function vifelse(
v1::Vec{W,Bool},
v2::VecUnroll{N,W,T},
v3::Union{NativeTypes,AbstractSIMDVector,StaticInt}
) where {N,W,T}
VecUnroll(fmap(vifelse, Vec{W,T}(v1), getfield(v2, :data), Vec{W,T}(v3)))
end
@inline function vifelse(
v1::Vec{W,Bool},
v2::Union{NativeTypes,AbstractSIMDVector,StaticInt},
v3::VecUnroll{N,W,T}
) where {N,W,T}
VecUnroll(fmap(vifelse, Vec{W,T}(v1), Vec{W,T}(v2), getfield(v3, :data)))
end
@inline function vifelse(
v1::VecUnroll{N,WB,<:Boolean},
v2::VecUnroll{N,W,T},
v3::Union{NativeTypes,AbstractSIMDVector,StaticInt}
) where {N,W,WB,T}
VecUnroll(
fmap(vifelse, getfield(v1, :data), getfield(v2, :data), Vec{W,T}(v3))
)
end
@inline function vifelse(
v1::VecUnroll{N,WB,<:Boolean},
v2::Union{NativeTypes,AbstractSIMDVector,StaticInt},
v3::VecUnroll{N,W,T}
) where {N,W,WB,T}
VecUnroll(
fmap(vifelse, getfield(v1, :data), Vec{W,T}(v2), getfield(v3, :data))
)
end
@inline function vifelse(
v1::Vec{W,Bool},
v2::VecUnroll{N,W,T},
v3::VecUnroll{N,W,T}
) where {N,W,T}
VecUnroll(
fmap(vifelse, Vec{W,T}(v1), getfield(v2, :data), getfield(v3, :data))
)
end
@inline function vifelse(
v1::VecUnroll{N,WB,<:Boolean},
v2::VecUnroll{N,W,T},
v3::VecUnroll{N,W,T}
) where {N,W,WB,T}
VecUnroll(
fmap(
vifelse,
getfield(v1, :data),
getfield(v2, :data),
getfield(v3, :data)
)
)
end
@inline function vifelse(
v1::VecUnroll{N,WB,<:Boolean},
v2::VecUnroll{N,W},
v3::VecUnroll{N,W}
) where {N,W,WB}
v4, v5 = promote(v2, v3)
VecUnroll(
fmap(
vifelse,
getfield(v1, :data),
getfield(v4, :data),
getfield(v5, :data)
)
)
end
@inline veq(v::VecUnroll{N,W,T}, x::AbstractIrrational) where {N,W,T} =
v == vbroadcast(Val{W}(), T(x))
@inline veq(x::AbstractIrrational, v::VecUnroll{N,W,T}) where {N,W,T} =
vbroadcast(Val{W}(), T(x)) == v
@inline vunsafe_trunc(::Type{T}, v::VecUnroll) where {T<:Real} =
VecUnroll(fmap(vunsafe_trunc, T, getfield(v, :data)))
@inline vrem(v::VecUnroll, ::Type{T}) where {T<:Real} =
VecUnroll(fmap(vrem, getfield(v, :data), T))
@inline vrem(
v::VecUnroll{N,W1},
::Type{VecUnroll{N,W2,T,V}}
) where {N,W1,W2,T,V} = VecUnroll(fmap(vrem, getfield(v, :data), V))
@inline (::Type{VecUnroll{N,W,T,V}})(
vu::VecUnroll{N,W,T,V}
) where {N,W,T,V<:AbstractSIMDVector{W,T}} = vu
@inline function (::Type{VecUnroll{N,W,T,VT}})(
vu::VecUnroll{N,W,S,VS}
) where {N,W,T,VT<:AbstractSIMDVector{W,T},S,VS<:AbstractSIMDVector{W,S}}
VecUnroll(fmap(convert, Vec{W,T}, getfield(vu, :data)))
end
function collapse_expr(N, op, final)
N += 1
t = Expr(:tuple)
s = Vector{Symbol}(undef, N)
for n ∈ 1:N
s_n = s[n] = Symbol(:v_, n)
push!(t.args, s_n)
end
q = quote
$(Expr(:meta, :inline))
$t = data(vu)
end
_final = if final == 1
1
else
2final
end
while N > _final
for n ∈ 1:N>>>1
push!(q.args, Expr(:(=), s[n], Expr(:call, op, s[n], s[n+(N>>>1)])))
end
isodd(N) && push!(q.args, Expr(:(=), s[1], Expr(:call, op, s[1], s[N])))
N >>>= 1
end
if final != 1
for n ∈ final+1:N
push!(q.args, Expr(:(=), s[n-final], Expr(:call, op, s[n-final], s[n])))
end
t = Expr(:tuple)
for n ∈ 1:final
push!(t.args, s[n])
end
push!(q.args, :(VecUnroll($t)))
end
q
end
@generated callapse(f::F, vu::VecUnroll{N}) where {F,N} =
collapse_expr(N, :f, 1)
@generated contract(f::F, vu::VecUnroll{N}, ::StaticInt{C}) where {F,N,C} =
collapse_expr(N, :f, C)
@generated collapse_add(vu::VecUnroll{N}) where {N} = collapse_expr(N, :vadd, 1)
@generated collapse_mul(vu::VecUnroll{N}) where {N} = collapse_expr(N, :vmul, 1)
@generated collapse_max(vu::VecUnroll{N}) where {N} = collapse_expr(N, :max, 1)
@generated collapse_min(vu::VecUnroll{N}) where {N} = collapse_expr(N, :min, 1)
@generated collapse_and(vu::VecUnroll{N}) where {N} = collapse_expr(N, :&, 1)
@generated collapse_or(vu::VecUnroll{N}) where {N} = collapse_expr(N, :|, 1)
@generated contract_add(vu::VecUnroll{N}, ::StaticInt{C}) where {N,C} =
collapse_expr(N, :vadd, C)
@generated contract_mul(vu::VecUnroll{N}, ::StaticInt{C}) where {N,C} =
collapse_expr(N, :vmul, C)
@generated contract_max(vu::VecUnroll{N}, ::StaticInt{C}) where {N,C} =
collapse_expr(N, :max, C)
@generated contract_min(vu::VecUnroll{N}, ::StaticInt{C}) where {N,C} =
collapse_expr(N, :min, C)
@generated contract_and(vu::VecUnroll{N}, ::StaticInt{C}) where {N,C} =
collapse_expr(N, :&, C)
@generated contract_or(vu::VecUnroll{N}, ::StaticInt{C}) where {N,C} =
collapse_expr(N, :|, C)
@inline vsum(vu::VecUnroll{N,W,T,V}) where {N,W,T,V<:AbstractSIMDVector{W,T}} =
VecUnroll(fmap(vsum, data(vu)))::VecUnroll{N,1,T,T}
@inline vsum(s::VecUnroll, vu::VecUnroll) =
VecUnroll(fmap(vsum, data(s), data(vu)))
@inline vprod(vu::VecUnroll) = VecUnroll(fmap(vprod, data(vu)))
@inline vprod(s::VecUnroll, vu::VecUnroll) =
VecUnroll(fmap(vprod, data(s), data(vu)))
@inline vmaximum(vu::VecUnroll) = VecUnroll(fmap(vmaximum, data(vu)))
@inline vminimum(vu::VecUnroll) = VecUnroll(fmap(vminimum, data(vu)))
@inline vall(vu::VecUnroll) = VecUnroll(fmap(vall, data(vu)))
@inline vany(vu::VecUnroll) = VecUnroll(fmap(vany, data(vu)))
@inline collapse_add(x) = x
@inline collapse_mul(x) = x
@inline collapse_max(x) = x
@inline collapse_min(x) = x
@inline collapse_and(x) = x
@inline collapse_or(x) = x
# @inline vsum(vu::VecUnroll) = vsum(collapse_add(vu))
# @inline vsum(s, vu::VecUnroll) = vsum(s, collapse_add(vu))
# @inline vprod(vu::VecUnroll) = vprod(collapse_mul(vu))
# @inline vprod(s, vu::VecUnroll) = vprod(s, collapse_mul(vu))
# @inline vmaximum(vu::VecUnroll) = vmaximum(collapse_max(vu))
# @inline vminimum(vu::VecUnroll) = vminimum(collapse_min(vu))
# @inline vall(vu::VecUnroll) = vall(collapse_and(vu))
# @inline vany(vu::VecUnroll) = vany(collapse_or(vu))
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 721 |
@inline _maybefirst(x) = x
@inline _maybefirst(x::VecUnroll) = first(data(x))
@inline _maybetail(x) = x
@inline _maybetail(::VecUnroll{0}) = ()
@inline _maybetail(x::VecUnroll) = VecUnroll(Base.tail(data(x)))
@inline _vload_map(_, ::Tuple, ::Tuple{}, __, ___) = ()
@inline function _vload_map(p, i, m, ::J, ::A) where {J,A}
x = _vload(p, map(_maybefirst, i), first(m), J(), A())
r = _vload_map(p, map(_maybetail, i), Base.tail(m), J(), A())
(x, r...)
end
@inline function _vload(
p::AbstractStridedPointer,
i::Tuple{
Vararg{Union{IntegerIndex,MM,VecUnroll{N,<:Any,<:Any,<:IntegerIndex}}}
},
m::VecUnroll{N,<:Any,Bit},
::J,
::A
) where {N,J,A}
VecUnroll(_vload_map(p, i, data(m), J(), A()))
end
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 75542 | # unroll
@inline Base.Broadcast.broadcastable(u::Unroll) = (u,)
"""
Returns a vector of expressions for a set of unrolled indices.
"""
function unrolled_indicies(
D::Int,
AU::Int,
F::Int,
N::Int,
AV::Int,
W::Int,
X::Int
)
baseind = Expr(:tuple)
for d = 1:D
i = Expr(:call, :Zero)
if d == AV && W > 1
i = Expr(:call, Expr(:curly, :MM, W, X), i)
end
push!(baseind.args, i)
end
inds = Vector{Expr}(undef, N)
inds[1] = baseind
for n = 1:N-1
ind = copy(baseind)
i = Expr(:call, Expr(:curly, :StaticInt, n * F))
if AU == AV && W > 1
i = Expr(:call, Expr(:curly, :MM, W, X), i)
end
ind.args[AU] = i
inds[n+1] = ind
end
inds
end
# This creates a generic expression that simply calls `vload` for each of the specified `Unroll`s without any fanciness.
function vload_unroll_quote(
D::Int,
AU::Int,
F::Int,
N::Int,
AV::Int,
W::Int,
M::UInt,
X::Int,
mask::Bool,
align::Bool,
rs::Int,
vecunrollmask::Bool
)
t = Expr(:tuple)
inds = unrolled_indicies(D, AU, F, N, AV, W, X)
# TODO: Consider doing some alignment checks before accepting user's `align`?
alignval = Expr(:call, align ? :True : :False)
rsexpr = Expr(:call, Expr(:curly, :StaticInt, rs))
q = quote
$(Expr(:meta, :inline))
gptr = similar_no_offset(sptr, gep(pointer(sptr), data(u)))
end
vecunrollmask && push!(q.args, :(masktup = data(vm)))
gf = GlobalRef(Core, :getfield)
for n = 1:N
l = Expr(:call, :_vload, :gptr, inds[n])
if vecunrollmask
push!(l.args, :($gf(masktup, $n, false)))
elseif mask && (M % Bool)
push!(l.args, :sm)
end
push!(l.args, alignval, rsexpr)
M >>= 1
push!(t.args, l)
end
push!(q.args, :(VecUnroll($t)))
q
end
# so I could call `linear_index`, then
# `IT, ind_type, W, X, M, O = index_summary(I)`
# `gesp` to set offset multiplier (`M`) and offset (`O`) to `0`.
# call, to build extended load quote (`W` below is `W*N`):
# vload_quote_llvmcall(
# T_sym::Symbol, I_sym::Symbol, ind_type::Symbol, W::Int, X::Int, M::Int, O::Int, mask::Bool, align::Bool, rs::Int, ret::Expr
# )
function interleave_memory_access(AU, C, F, X, UN, size_T, B)
((((AU == C) && (C > 0)) && (F == 1)) && (abs(X) == (UN * size_T)) && (B < 1))
end
# if either
# x = rand(3,L);
# foo(x[1,i],x[2,i],x[3,i])
# `(AU == 1) & (AV == 2) & (F == 1) & (stride(p,2) == N)`
# or
# x = rand(3L);
# foo(x[3i - 2], x[3i - 1], x[3i ])
# Index would be `(MM{W,3}(1),)`
# so we have `AU == AV == 1`, but also `X == N == F`.
function shuffle_load_quote(
::Type{T},
integer_params::NTuple{9,Int},
::Type{I},
align::Bool,
rs::Int,
MASKFLAG::UInt
) where {T,I}
Sys.CPU_NAME === "znver1" && return nothing
IT, ind_type, _W, _X, M, O = index_summary(I)
size_T = sizeof(T)
T_sym = JULIA_TYPES[T]
I_sym = JULIA_TYPES[IT]
_shuffle_load_quote(
T_sym,
size_T,
integer_params,
I_sym,
ind_type,
M,
O,
align,
rs,
MASKFLAG
)
end
function _shuffle_load_quote(
T_sym::Symbol,
size_T::Int,
integer_params::NTuple{9,Int},
I_sym::Symbol,
ind_type::Symbol,
M::Int,
O::Int,
align::Bool,
rs::Int,
MASKFLAG::UInt
)
N, C, B, AU, F, UN, AV, W, X = integer_params
# we don't require vector indices for `Unroll`s...
# @assert _W == W "W from index $(_W) didn't equal W from Unroll $W."
mask = MASKFLAG ≠ zero(UInt)
if mask && (
(MASKFLAG & ((one(UInt) << UN) - one(UInt))) ≠
((one(UInt) << UN) - one(UInt))
)
return nothing
# throw(ArgumentError("`shuffle_load_quote` currently requires masking either all or none of the unrolled loads."))
end
if mask && Base.libllvm_version < v"11"
return nothing
end
# We need to unroll in a contiguous dimension for this to be a shuffle store, and we need the step between the start of the vectors to be `1`
# @show X, UN, size_T
((AV > 0) && interleave_memory_access(AU, C, F, X, UN, size_T, B)) ||
return nothing
Wfull = W * UN
(mask && (Wfull > 128)) && return nothing
# `X` is stride between indices, e.g. `X = 3` means our final vectors should be `<x[0], x[3], x[6], x[9]>`
# We need `X` to equal the steps (the unrolling factor)
vloadexpr = vload_quote_llvmcall(
T_sym,
I_sym,
ind_type,
Wfull,
size_T,
M,
O,
mask,
align,
rs,
:(_Vec{$Wfull,$T_sym})
)
q = quote
$(Expr(:meta, :inline))
ptr = pointer(sptr)
i = data(u)
end
X < 0 && push!(q.args, :(ptr -= $(size_T * (UN * (W - 1)))))
if mask
return nothing
if X > 0
mask_expr = :(mask(StaticInt{$W}(), 0, vmul_nw($UN, getfield(sm, :evl))))
for n ∈ 1:UN-1
mask_expr = :(vcat(
$mask_expr,
mask(StaticInt{$W}(), $(n * W), vmul_nw($UN, getfield(sm, :evl)))
))
end
# push!(q.args, :(m = mask(StaticInt{$Wfull}(), vmul_nw($UN, getfield(sm, :evl)))))
else
# FIXME
return nothing
vrange = :(VectorizationBase.vrange(
Val{$W}(),
$(integer_of_bytes(min(size_T, rs ÷ W))),
Val{0}(),
Val{-1}()
))
mask_expr = :(($vrange + $(UN * W)) ≤ vmul_nw($UN, getfield(sm, :evl)))
for n ∈ UN-1:-1:1
mask_expr = :(vcat(
$mask_expr,
($vrange + $(n * W)) ≤ vmul_nw($UN, getfield(sm, :evl))
))
end
end
push!(q.args, :(m = $mask_expr))
end
push!(q.args, :(v = $vloadexpr))
vut = Expr(:tuple)
Wrange = X > 0 ? (0:1:W-1) : (W-1:-1:0)
for n ∈ 0:UN-1
shufftup = Expr(:tuple)
for w ∈ Wrange
push!(shufftup.args, n + UN * w)
end
push!(vut.args, :(shufflevector(v, Val{$shufftup}())))
end
push!(q.args, Expr(:call, :VecUnroll, vut))
q
end
function init_transpose_memop_masking!(q::Expr, M::UInt, N::Int, evl::Bool)
domask = M ≠ zero(UInt)
if domask
if (M & ((one(UInt) << N) - one(UInt))) ≠ ((one(UInt) << N) - one(UInt))
throw(
ArgumentError(
"`vload_transpose_quote` currently requires masking either all or none of the unrolled loads."
)
)
end
if evl
push!(q.args, :(_evl = getfield(sm, :evl)))
else
push!(q.args, :(u_1 = getfield(sm, :u)))
end
end
domask
end
function push_transpose_mask!(
q::Expr,
mq::Expr,
domask::Bool,
n::Int,
npartial::Int,
w::Int,
W::Int,
evl::Bool,
RS::Int,
mask::UInt
)
Utyp = mask_type_symbol(n)
if domask
mw_w = Symbol(:mw_, w)
if evl
mm_evl_cmp = Symbol(:mm_evl_cmp_, n)
if w == 1
isym = integer_of_bytes_symbol(min(4, RS ÷ n))
vmmtyp =
:(VectorizationBase._vrange(Val{$n}(), $isym, Val{0}(), Val{1}()))
push!(q.args, :($mm_evl_cmp = $vmmtyp))
push!(q.args, :($mw_w = vmul_nw(_evl, $(UInt32(n))) > $mm_evl_cmp))
else
push!(
q.args,
:(
$mw_w =
(
vsub_nsw(vmul_nw(_evl, $(UInt32(n))), $(UInt32(n * (w - 1)))) %
Int32
) > ($mm_evl_cmp)
)
)
end
if n == npartial
push!(mq.args, mw_w)
else
push!(mq.args, :(Mask{$n}($mask % $Utyp) & $mw_w))
end
else
push!(
q.args,
:(
$mw_w =
Core.ifelse($(Symbol(:u_, w)) % Bool, $mask % $Utyp, zero($Utyp))
)
)
if w < W
push!(
q.args,
Expr(
:(=),
Symbol(:u_, w + 1),
Expr(:call, :(>>>), Symbol(:u_, w), 1)
)
)
end
push!(mq.args, :(Mask{$n}($mw_w)))
end
elseif n ≠ npartial
push!(mq.args, :(Mask{$n}($mask % $Utyp)))
end
nothing
end
function vload_transpose_quote(
D::Int,
AU::Int,
F::Int,
N::Int,
AV::Int,
W::Int,
X::Int,
align::Bool,
RS::Int,
st::Int,
M::UInt,
evl::Bool
)
ispow2(W) ||
throw(ArgumentError("Vector width must be a power of 2, but recieved $W."))
isone(F) || throw(
ArgumentError(
"No point to doing a transposed store if unroll step factor $F != 1"
)
)
C = AU # the point of tranposing
q = Expr(
:block,
Expr(:meta, :inline),
:(gptr = similar_no_offset(sptr, gep(pointer(sptr), data(u))))
)
domask = init_transpose_memop_masking!(q, M, N, evl)
alignval = Expr(:call, align ? :True : :False)
rsexpr = Expr(:call, Expr(:curly, :StaticInt, RS))
vut = Expr(:tuple)
# vds = Vector{Symbol}(undef, N)
# for n ∈ 1:N
# vds[n] = vdn = Symbol(:vd_,n)
# push!(q.args, :($vdn = getfield(vd, $n, false)))
# end
# AU = 1, AV = 2, N = 3, W = 8, M = 0x7 (<1,1,1,0>), mask unknown, hypothetically 0x1f <1 1 1 1 1 0 0 0>
# load 5 vetors of length 3, replace last 3 with undef
i = 0
Wmax = RS ÷ st
while N > 0
# for store, we do smaller unrolls first.
# for load, we start with larger unrolls.
# The idea is that this is probably a better order to make use of execution resources.
# For load, we want to begin immediately issuing loads, but then we'd also like to do other work, e.g. shuffles at the same time.
# Before issueing the loads, register pressure is also probably low, and it will be higher towards the end. Another reason to do transposes early.
# For stores, we also want to begin immediately issueing stores, so we start with smaller unrolls so that there is less work
# to do beforehand. This also immediately frees up registers for use while transposing, in case of register pressure.
if N ≥ Wmax
npartial = n = Wmax
mask = -1 % UInt
else
npartial = N
n = nextpow2(npartial)
# if npartial == n
# mask = -1 % UInt
# else
mask = (one(UInt) << (npartial)) - one(UInt)
# end
end
N -= npartial
if n == 1
# this can only happen on the first iter, so `StaticInt{0}()` is fine
ind = Expr(:tuple)
for d ∈ 1:D
if AV == d
push!(ind.args, :(MM{$W,$X}(StaticInt{0}())))
elseif AU == d
push!(ind.args, :(StaticInt{$i}()))
else
push!(ind.args, :(StaticInt{0}()))
end
end
loadq = :(_vload(gptr, $ind))
# we're not shuffling when `n == 1`, so we just push the mask
domask && push!(loadq.args, :sm)
push!(loadq.args, alignval, rsexpr)
push!(q.args, :(vl_1 = $loadq))
push!(vut.args, :vl_1)
elseif W == 1
loadq = loadq_expr!(
q,
D,
AU,
AV,
n,
i,
X,
W,
W,
domask,
npartial,
evl,
RS,
mask,
alignval,
rsexpr
)
loadsym = Symbol(:vloadw1_, i, :_, n)
push!(q.args, Expr(:(=), loadsym, loadq))
for nn ∈ 1:npartial
push!(vut.args, :(extractelement($loadsym, $(nn - 1))))
end
else
# dname is a `VecUnroll{(W-1),N}`
t = Expr(:tuple)
dname = Symbol(:vud_, i, :_, n)
for w ∈ 1:W
# if domask, these get masked
loadq = loadq_expr!(
q,
D,
AU,
AV,
n,
i,
X,
w,
W,
domask,
npartial,
evl,
RS,
mask,
alignval,
rsexpr
)
push!(t.args, loadq)
end
push!(q.args, :($dname = data(transpose_vecunroll(VecUnroll($t)))))
for nn ∈ 1:npartial
extract = :(getfield($dname, $nn, false))
push!(vut.args, extract)
end
end
# M >>>= 1
i += npartial
end
push!(q.args, :(VecUnroll($vut)))
q
end
function loadq_expr!(
q,
D,
AU,
AV,
n,
i,
X,
w,
W,
domask,
npartial,
evl,
RS,
mask,
alignval,
rsexpr
)
ind = Expr(:tuple)
for d ∈ 1:D
if AU == d
push!(ind.args, :(MM{$n}(StaticInt{$i}())))
elseif AV == d
push!(ind.args, :(StaticInt{$(X * (w - 1))}()))
else
push!(ind.args, :(StaticInt{0}()))
end
end
# transposing mask does what?
loadq = :(_vload(gptr, $ind))
push_transpose_mask!(q, loadq, domask, n, npartial, w, W, evl, RS, mask)
push!(loadq.args, alignval, rsexpr)
loadq
end
# @inline staticunrolledvectorstride(_, __) = nothing
# @inline staticunrolledvectorstride(::StaticInt{M}, ::StaticInt{X}) where {M,X} = StaticInt{M}() * StaticInt{X}()
# @inline staticunrolledvectorstride(sp::AbstractStridedPointer, ::Unroll{AU,F,UN,AV,W,M,X}) where {AU,F,UN,AV,W,M,X} = staticunrolledvectorstride(static_strides(ptr)[AV], StaticInt{X}())
@generated function staticunrolledvectorstride(
sptr::T,
::Unroll{AU,F,UN,AV,W,M,X}
) where {T,AU,F,UN,AV,W,M,X}
(0 < AV ≤ length(T.parameters)) || return nothing
SM = T.parameters[AV]
if SM <: StaticInt
return Expr(
:block,
Expr(:meta, :inline),
Expr(:call, *, Expr(:call, SM), Expr(:call, Expr(:curly, :StaticInt, X)))
)
else
return nothing
end
end
function should_transpose_memop(F, C, AU, AV, UN, M)
(F == 1) & (C == AU) & (C ≠ AV) || return false
max_mask = (one(UInt) << UN) - one(UInt)
(M == zero(UInt)) | ((max_mask & M) == max_mask)
end
function bitload(AU::Int, W::Int, AV::Int, F::Int, UN::Int, RS::Int, mask::Bool)
if AU ≠ AV
1 < W < 8 && throw(
ArgumentError(
"Must unroll in vectorized direction for `Bit` loads with W < 8."
)
)
return
end
if (1 < W < 8) && F ≠ W
throw(
ArgumentError(
"Must take steps of size $W along unrolled and vectorized axis when loading from bits."
)
)
end
loadq = :(__vload(pointer(sptr), MM{$(W * UN)}(ind)))
mask && push!(loadq.args, :sm)
push!(loadq.args, :(False()), :(StaticInt{$RS}()))
quote
$(Expr(:meta, :inline))
ind = getfield(u, :i)
VecUnroll(splitvectortotuple(StaticInt{$UN}(), StaticInt{$W}(), $loadq))
end
end
@generated function _vload_unroll(
sptr::AbstractStridedPointer{T,N,C,B},
u::Unroll{AU,F,UN,AV,W,M,UX,I},
::A,
::StaticInt{RS},
::StaticInt{X}
) where {
T<:NativeTypes,
N,
C,
B,
AU,
F,
UN,
AV,
W,
M,
UX,
I<:IndexNoUnroll,
A<:StaticBool,
RS,
X
}
1 + 2
if T === Bit
bitlq = bitload(AU, W, AV, F, UN, RS, false)
bitlq === nothing || return bitlq
end
align = A === True
should_transpose =
T !== Bit && should_transpose_memop(F, C, AU, AV, UN, zero(UInt))
if (W == N) & ((sizeof(T) * W) == RS) & should_transpose
return vload_transpose_quote(
N,
AU,
F,
UN,
AV,
W,
UX,
align,
RS,
sizeof(T),
zero(UInt),
false
)
end
maybeshufflequote = shuffle_load_quote(
T,
(N, C, B, AU, F, UN, AV, W, X),
I,
align,
RS,
zero(UInt)
)
maybeshufflequote === nothing || return maybeshufflequote
if should_transpose
vload_transpose_quote(
N,
AU,
F,
UN,
AV,
W,
UX,
align,
RS,
sizeof(T),
zero(UInt),
false
)
else
vload_unroll_quote(N, AU, F, UN, AV, W, M, UX, false, align, RS, false)
end
end
@generated function _vload_unroll(
sptr::AbstractStridedPointer{T,N,C,B},
u::Unroll{AU,F,UN,AV,W,M,UX,I},
::A,
::StaticInt{RS},
::Nothing
) where {
T<:NativeTypes,
N,
C,
B,
AU,
F,
UN,
AV,
W,
M,
UX,
I<:IndexNoUnroll,
A<:StaticBool,
RS
}
# 1+2
# @show AU,F,UN,AV,W,M,UX,I
if T === Bit
bitlq = bitload(AU, W, AV, F, UN, RS, false)
bitlq === nothing || return bitlq
end
should_transpose =
T !== Bit && should_transpose_memop(F, C, AU, AV, UN, zero(UInt))
if should_transpose
vload_transpose_quote(
N,
AU,
F,
UN,
AV,
W,
UX,
A === True,
RS,
sizeof(T),
zero(UInt),
false
)
else
vload_unroll_quote(N, AU, F, UN, AV, W, M, UX, false, A === True, RS, false)
end
end
@generated function _vload_unroll(
sptr::AbstractStridedPointer{T,D,C,B},
u::Unroll{AU,F,N,AV,W,M,UX,I},
sm::EVLMask{W},
::A,
::StaticInt{RS},
::StaticInt{X}
) where {A<:StaticBool,AU,F,N,AV,W,M,I<:IndexNoUnroll,T,D,RS,UX,X,C,B}
if T === Bit
bitlq = bitload(AU, W, AV, F, N, RS, true)
bitlq === nothing || return bitlq
end
1 + 2
should_transpose = T !== Bit && should_transpose_memop(F, C, AU, AV, N, M)
align = A === True
if (W == N) & ((sizeof(T) * W) == RS) & should_transpose
return vload_transpose_quote(
D,
AU,
F,
N,
AV,
W,
UX,
align,
RS,
sizeof(T),
M,
true
)
end
# maybeshufflequote = shuffle_load_quote(T, (D, C, B, AU, F, N, AV, W, X), I, align, RS, M)
# maybeshufflequote === nothing || return maybeshufflequote
if should_transpose
return vload_transpose_quote(
D,
AU,
F,
N,
AV,
W,
UX,
align,
RS,
sizeof(T),
M,
true
)
end
vload_unroll_quote(D, AU, F, N, AV, W, M, UX, true, align, RS, false)
end
@generated function _vload_unroll(
sptr::AbstractStridedPointer{T,D,C},
u::Unroll{AU,F,N,AV,W,M,UX,I},
sm::AbstractMask{W},
::A,
::StaticInt{RS},
::Any
) where {A<:StaticBool,AU,F,N,AV,W,M,I<:IndexNoUnroll,T,D,RS,UX,C}
1 + 2
if T === Bit
bitlq = bitload(AU, W, AV, F, N, RS, true)
bitlq === nothing || return bitlq
end
align = A === True
if T !== Bit && should_transpose_memop(F, C, AU, AV, N, M)
isevl = sm <: EVLMask
return vload_transpose_quote(
D,
AU,
F,
N,
AV,
W,
UX,
align,
RS,
sizeof(T),
M,
isevl
)
end
vload_unroll_quote(D, AU, F, N, AV, W, M, UX, true, align, RS, false)
end
# @generated function _vload_unroll(
# sptr::AbstractStridedPointer{T,D}, u::Unroll{AU,F,N,AV,W,M,UX,I}, vm::VecUnroll{Nm1,W,B}, ::A, ::StaticInt{RS}, ::StaticInt{X}
# ) where {A<:StaticBool,AU,F,N,AV,W,M,I<:IndexNoUnroll,T,D,RS,UX,Nm1,B<:Union{Bool,Bit},X}
# Nm1+1 == N || throw(ArgumentError("Nm1 + 1 = $(Nm1 + 1) ≠ $N = N"))
# maybeshufflequote = shuffle_load_quote(T, (N, C, B, AU, F, UN, AV, W, X), I, align, RS, 2)
# maybeshufflequote === nothing || return maybeshufflequote
# vload_unroll_quote(D, AU, F, N, AV, W, M, UX, true, A === True, RS, true)
# end
@generated function _vload_unroll(
sptr::AbstractStridedPointer{T,D},
u::Unroll{AU,F,N,AV,W,M,UX,I},
vm::VecUnroll{Nm1,<:Any,<:Union{Bool,Bit}},
::A,
::StaticInt{RS},
::Any
) where {A<:StaticBool,AU,F,N,AV,W,M,I<:IndexNoUnroll,T,D,RS,UX,Nm1}
Nm1 + 1 == N || throw(ArgumentError("Nm1 + 1 = $(Nm1 + 1) ≠ $N = N"))
vload_unroll_quote(D, AU, F, N, AV, W, M, UX, true, A === True, RS, true)
end
@inline function _vload(
ptr::AbstractStridedPointer,
u::Unroll,
::A,
::StaticInt{RS}
) where {A<:StaticBool,RS}
p, li = linear_index(ptr, u)
sptr = similar_no_offset(ptr, p)
_vload_unroll(
sptr,
li,
A(),
StaticInt{RS}(),
staticunrolledvectorstride(static_strides(ptr), u)
)
end
@inline function _vload(
ptr::AbstractStridedPointer,
u::Unroll,
m::AbstractMask,
::A,
::StaticInt{RS}
) where {A<:StaticBool,RS}
p, li = linear_index(ptr, u)
sptr = similar_no_offset(ptr, p)
_vload_unroll(
sptr,
li,
m,
A(),
StaticInt{RS}(),
staticunrolledvectorstride(static_strides(ptr), u)
)
end
@inline function _vload(
ptr::AbstractStridedPointer,
u::Unroll,
m::VecUnroll{Nm1,W,B},
::A,
::StaticInt{RS}
) where {A<:StaticBool,RS,Nm1,W,B<:Union{Bool,Bit}}
p, li = linear_index(ptr, u)
sptr = similar_no_offset(ptr, p)
_vload_unroll(
sptr,
li,
m,
A(),
StaticInt{RS}(),
staticunrolledvectorstride(static_strides(ptr), u)
)
end
@inline function _vload(
ptr::AbstractStridedPointer{T},
u::Unroll{AU,F,N,AV,W},
m::Bool,
::A,
::StaticInt{RS}
) where {A<:StaticBool,RS,AU,F,N,AV,W,T}
if m
_vload(ptr, u, A(), StaticInt{RS}())
else
zero_vecunroll(StaticInt{N}(), StaticInt{W}(), T, StaticInt{RS}())
end
end
@generated function vload(
r::FastRange{T},
u::Unroll{AU,F,N,AV,W,M,X,I}
) where {AU,F,N,AV,W,M,X,I,T}
_vload_fastrange_unroll(AU, F, N, AV, W, M, X, false, false)
end
@generated function vload(
r::FastRange{T},
u::Unroll{AU,F,N,AV,W,M,X,I},
m::AbstractMask
) where {AU,F,N,AV,W,M,X,I,T}
_vload_fastrange_unroll(AU, F, N, AV, W, M, X, true, false)
end
@generated function vload(
r::FastRange{T},
u::Unroll{AU,F,N,AV,W,M,X,I},
vm::VecUnroll{Nm1,<:Any,<:Union{Bool,Bit}}
) where {AU,F,N,AV,W,M,X,I,T,Nm1}
Nm1 + 1 == N || throw(ArgumentError("Nm1 + 1 = $(Nm1 + 1) ≠ $N = N"))
_vload_fastrange_unroll(AU, F, N, AV, W, M, X, false, true)
end
function vstore_unroll_quote(
D::Int,
AU::Int,
F::Int,
N::Int,
AV::Int,
W::Int,
M::UInt,
X::Int,
mask::Bool,
align::Bool,
noalias::Bool,
nontemporal::Bool,
rs::Int,
vecunrollmask::Bool
)
t = Expr(:tuple)
inds = unrolled_indicies(D, AU, F, N, AV, W, X)
q = quote
$(Expr(:meta, :inline))
gptr = similar_no_offset(sptr, gep(pointer(sptr), data(u)))
# gptr = gesp(sptr, getfield(u, :i))
t = data(vu)
end
alignval = Expr(:call, align ? :True : :False)
noaliasval = Expr(:call, noalias ? :True : :False)
nontemporalval = Expr(:call, nontemporal ? :True : :False)
rsexpr = Expr(:call, Expr(:curly, :StaticInt, rs))
if vecunrollmask
push!(q.args, :(masktup = data(vm)))
end
gf = GlobalRef(Core, :getfield)
for n = 1:N
l = Expr(:call, :_vstore!, :gptr, Expr(:call, gf, :t, n, false), inds[n])
if vecunrollmask
push!(l.args, :($gf(masktup, $n, false)))
elseif mask && (M % Bool)
push!(l.args, :sm)
end
push!(l.args, alignval, noaliasval, nontemporalval, rsexpr)
M >>= 1
push!(q.args, l)
end
q
end
function shuffle_store_quote(
::Type{T},
integer_params::NTuple{9,Int},
::Type{I},
align::Bool,
alias::Bool,
notmp::Bool,
rs::Int,
mask::Bool
) where {T,I}
Sys.CPU_NAME === "znver1" && return nothing
IT, ind_type, _W, _X, M, O = index_summary(I)
T_sym = JULIA_TYPES[T]
I_sym = JULIA_TYPES[IT]
size_T = sizeof(T)
_shuffle_store_quote(
T_sym,
size_T,
integer_params,
I_sym,
ind_type,
M,
O,
align,
alias,
notmp,
rs,
mask
)
end
function _shuffle_store_quote(
T_sym::Symbol,
size_T::Int,
integer_params::NTuple{9,Int},
I_sym::Symbol,
ind_type::Symbol,
M::Int,
O::Int,
align::Bool,
alias::Bool,
notmp::Bool,
rs::Int,
mask::Bool
)
N, C, B, AU, F, UN, AV, W, X = integer_params
W == 1 && return nothing
# we don't require vector indices for `Unroll`s...
# @assert _W == W "W from index $(_W) didn't equal W from Unroll $W."
# We need to unroll in a contiguous dimension for this to be a shuffle store, and we need the step between the start of the vectors to be `1`
interleave_memory_access(AU, C, F, X, UN, size_T, B) || return nothing
(mask && (Base.libllvm_version < v"11")) && return nothing
# `X` is stride between indices, e.g. `X = 3` means our final vectors should be `<x[0], x[3], x[6], x[9]>`
# We need `X` to equal the steps (the unrolling factor)
Wfull = W * UN
(mask && (Wfull > 128)) && return nothing
# the approach for combining is to keep concatenating vectors to double their length
# until we hit ≥ half Wfull, then we `vresize` the remainder, and shuffle in the final combination before storing.
# mask = false
vstoreexpr = vstore_quote(
T_sym,
I_sym,
ind_type,
Wfull,
size_T,
M,
O,
mask,
align,
alias,
notmp,
rs
)
q = quote
$(Expr(:meta, :inline))
ptr = pointer(sptr)
t = data(vu)
i = data(u)
end
X < 0 && push!(q.args, :(ptr -= $(size_T * (UN * (W - 1)))))
syms = Vector{Symbol}(undef, UN)
gf = GlobalRef(Core, :getfield)
for n ∈ 1:UN
syms[n] = vs = Symbol(:v_, n)
push!(q.args, Expr(:(=), vs, Expr(:call, gf, :t, n)))
end
Wtemp = W
Nvec = UN
# first, we start concatenating vectors
while 2Wtemp < Wfull
Wnext = 2Wtemp
Nvecnext = (Nvec >>> 1)
for n ∈ 1:Nvecnext
v1 = syms[2n-1]
v2 = syms[2n]
vc = Symbol(v1, :_, v2)
push!(q.args, Expr(:(=), vc, Expr(:call, :vcat, v1, v2)))
syms[n] = vc
end
if isodd(Nvec)
syms[Nvecnext+1] = syms[Nvec]
Nvec = Nvecnext + 1
else
Nvec = Nvecnext
end
Wtemp = Wnext
end
shufftup = Expr(:tuple)
for w ∈ ((X > 0) ? (0:1:W-1) : (W-1:-1:0))
for n ∈ 0:UN-1
push!(shufftup.args, W * n + w)
end
end
mask &&
push!(q.args, :(m = mask(StaticInt{$Wfull}(), vmul_nw($UN, $gf(sm, :evl)))))
push!(
q.args,
Expr(
:(=),
:v,
Expr(
:call,
:shufflevector,
syms[1],
syms[2],
Expr(:call, Expr(:curly, :Val, shufftup))
)
)
)
push!(q.args, vstoreexpr)
q
end
function sparse_index_tuple(N, d, o)
t = Expr(:tuple)
for n ∈ 1:N
if n == d
push!(t.args, :(StaticInt{$o}()))
else
push!(t.args, :(StaticInt{0}()))
end
end
t
end
function vstore_transpose_quote(
D,
AU,
F,
N,
AV,
W,
X,
align,
alias,
notmp,
RS,
st,
Tsym,
M,
evl
)
ispow2(W) ||
throw(ArgumentError("Vector width must be a power of 2, but recieved $W."))
isone(F) || throw(
ArgumentError(
"No point to doing a transposed store if unroll step factor $F != 1"
)
)
C = AU # the point of tranposing
q = Expr(
:block,
Expr(:meta, :inline),
:(vd = data(vu)),
:(gptr = similar_no_offset(sptr, gep(pointer(sptr), data(u))))
)
alignval = Expr(:call, align ? :True : :False)
aliasval = Expr(:call, alias ? :True : :False)
notmpval = Expr(:call, notmp ? :True : :False)
rsexpr = Expr(:call, Expr(:curly, :StaticInt, RS))
domask = init_transpose_memop_masking!(q, M, N, evl)
vds = Vector{Symbol}(undef, N)
for n ∈ 1:N
vds[n] = vdn = Symbol(:vd_, n)
push!(q.args, :($vdn = getfield(vd, $n, false)))
end
i = 0
# Use `trailing_zeros` to decompose unroll amount, `N`, into a sum of powers-of-2
Wmax = RS ÷ st
while N > 0
r = N % Wmax
if r == 0
npartial = n = Wmax
mask = ~zero(UInt)
else
npartial = r
n = nextpow2(npartial)
if npartial == n
mask = ~zero(UInt)
else
mask = (one(UInt) << (npartial)) - one(UInt)
end
end
N -= npartial
if n == 1
# this can only happen on the first iter, so `StaticInt{0}()` is fine
ind = Expr(:tuple)
for d ∈ 1:D
if AV == d
push!(ind.args, :(MM{$W,$X}(StaticInt{0}())))
else
push!(ind.args, :(StaticInt{0}()))
end
end
storeq = :(_vstore!(gptr, $(vds[1]), $ind))
domask && push!(storeq.args, :sm)
push!(storeq.args, alignval, aliasval, notmpval, rsexpr)
push!(q.args, storeq)
# elseif n < W
# elseif n == W
else
t = Expr(:tuple)
for nn ∈ 1:npartial
push!(t.args, vds[i+nn])
end
for nn ∈ npartial+1:n
# if W == 1
# push!(t.args, :(zero($Tsym)))
# else
push!(t.args, :(_vundef(StaticInt{$W}(), $Tsym)))
# end
end
dname = Symbol(:vud_, i, :_, n)
if W == 1
push!(q.args, :($dname = transpose_vecunroll(VecUnroll($t))))
else
push!(q.args, :($dname = data(transpose_vecunroll(VecUnroll($t)))))
end
# dname is a `VecUnroll{(W-1),N}`
for w ∈ 1:W
ind = Expr(:tuple)
for d ∈ 1:D
if AU == d
push!(ind.args, :(MM{$n}(StaticInt{$i}())))
elseif AV == d
push!(ind.args, :(StaticInt{$(X * (w - 1))}()))
else
push!(ind.args, :(StaticInt{0}()))
end
end
# transposing mask does what?
storeq = if W == 1
:(_vstore!(gptr, $dname, $ind))
else
:(_vstore!(gptr, getfield($dname, $w, false), $ind))
end
push_transpose_mask!(
q,
storeq,
domask,
n,
npartial,
w,
W,
evl,
RS,
mask
)
push!(storeq.args, alignval, aliasval, notmpval, rsexpr)
push!(q.args, storeq)
end
end
# M >>>= 1
i += npartial
end
# @show
q
end
@generated function _vstore_unroll!(
sptr::AbstractStridedPointer{T,D,C,B},
vu::VecUnroll{Nm1,W,VUT,<:VecOrScalar},
u::Unroll{AU,F,N,AV,W,M,UX,I},
::A,
::S,
::NT,
::StaticInt{RS},
::StaticInt{X}
) where {
AU,
F,
N,
AV,
W,
M,
I<:IndexNoUnroll,
T,
D,
Nm1,
S<:StaticBool,
A<:StaticBool,
NT<:StaticBool,
RS,
C,
B,
UX,
X,
VUT
}
N == Nm1 + 1 || throw(
ArgumentError(
"The unrolled index specifies unrolling by $N, but sored `VecUnroll` is unrolled by $(Nm1+1)."
)
)
VUT === T || return Expr(
:block,
Expr(:meta, :inline),
:(_vstore_unroll!(
sptr,
vconvert($T, vu),
u,
$(A()),
$(S()),
$(NT()),
$(StaticInt(RS)),
$(StaticInt(X))
))
)
if (T === Bit) && (F == W < 8) && (UX == 1) && (AV == AU == C > 0)
return quote
$(Expr(:meta, :inline))
__vstore!(
pointer(sptr),
vu,
MM{$(N * W)}(_materialize(data(u))),
$A(),
$S(),
$NT(),
StaticInt{$RS}()
)
end
end
# 1+1
align = A === True
alias = S === True
notmp = NT === True
should_transpose =
T !== Bit && should_transpose_memop(F, C, AU, AV, N, zero(UInt))
if (W == N) & ((sizeof(T) * W) == RS) & should_transpose
# should transpose means we'll transpose, but we'll only prefer it over the
# `shuffle_store_quote` implementation if W == N, and we're using the entire register.
# Otherwise, llvm's shuffling is probably more clever/efficient when the conditions for
# `shuffle_store_quote` actually hold.
return vstore_transpose_quote(
D,
AU,
F,
N,
AV,
W,
UX,
align,
alias,
notmp,
RS,
sizeof(T),
JULIA_TYPES[T],
zero(UInt),
false
)
end
maybeshufflequote = shuffle_store_quote(
T,
(D, C, B, AU, F, N, AV, W, X),
I,
align,
alias,
notmp,
RS,
false
)
maybeshufflequote === nothing || return maybeshufflequote
if should_transpose
vstore_transpose_quote(
D,
AU,
F,
N,
AV,
W,
UX,
align,
alias,
notmp,
RS,
sizeof(T),
JULIA_TYPES[T],
zero(UInt),
false
)
else
vstore_unroll_quote(
D,
AU,
F,
N,
AV,
W,
M,
UX,
false,
align,
alias,
notmp,
RS,
false
)
end
end
@generated function _vstore_unroll!(
sptr::AbstractStridedPointer{T,D,C,B},
vu::VecUnroll{Nm1,W,VUT,<:VecOrScalar},
u::Unroll{AU,F,N,AV,W,M,UX,I},
::A,
::S,
::NT,
::StaticInt{RS},
::Nothing
) where {
AU,
F,
N,
AV,
W,
M,
I<:IndexNoUnroll,
T,
D,
Nm1,
S<:StaticBool,
A<:StaticBool,
NT<:StaticBool,
RS,
C,
B,
UX,
VUT
}
VUT === T || return Expr(
:block,
Expr(:meta, :inline),
:(_vstore_unroll!(
sptr,
vconvert($T, vu),
u,
$(A()),
$(S()),
$(NT()),
$(StaticInt(RS)),
nothing
))
)
if (T === Bit) && (F == W < 8) && (UX == 1) && (AV == AU == C > 0)
return quote
$(Expr(:meta, :inline))
__vstore!(
pointer(sptr),
vu,
MM{$(N * W)}(_materialize(data(u))),
$A(),
$S(),
$NT(),
StaticInt{$RS}()
)
end
end
align = A === True
alias = S === True
notmp = NT === True
N == Nm1 + 1 || throw(
ArgumentError(
"The unrolled index specifies unrolling by $N, but sored `VecUnroll` is unrolled by $(Nm1+1)."
)
)
if T !== Bit && should_transpose_memop(F, C, AU, AV, N, zero(UInt))
vstore_transpose_quote(
D,
AU,
F,
N,
AV,
W,
UX,
align,
alias,
notmp,
RS,
sizeof(T),
JULIA_TYPES[T],
zero(UInt),
false
)
else
vstore_unroll_quote(
D,
AU,
F,
N,
AV,
W,
M,
UX,
false,
align,
alias,
notmp,
RS,
false
)
end
end
@generated function flattenmask(
m::AbstractMask{W},
::Val{M},
::StaticInt{N}
) where {W,N,M}
WN = W * N
MT = mask_type(WN)
MTS = mask_type_symbol(WN)
q = Expr(
:block,
Expr(:meta, :inline),
:(u = zero($MTS)),
:(mu = data(m)),
:(mf = (one($MTS) << $W) - one($MTS))
)
M = (bitreverse(M) >>> (8sizeof(M) - N))
n = N
while true
push!(q.args, :(u |= $(M % Bool ? :mu : :mf)))
(n -= 1) == 0 && break
push!(q.args, :(u <<= $(MT(W))))
M >>= 1
end
push!(q.args, :(Mask{$WN}(u)))
q
end
@generated function flattenmask(
vm::VecUnroll{Nm1,W,Bit},
::Val{M}
) where {W,Nm1,M}
N = Nm1 + 1
WN = W * N
MT = mask_type(WN)
MTS = mask_type_symbol(WN)
q = Expr(
:block,
Expr(:meta, :inline),
:(u = zero($MTS)),
:(mu = data(vm)),
:(mf = (one($MTS) << $W) - one($MTS))
)
M = (bitreverse(M) >>> (8sizeof(M) - N))
n = 0
while true
n += 1
if M % Bool
push!(q.args, :(u |= data(getfield(mu, $n))))
else
push!(q.args, :(u |= mf))
end
n == N && break
push!(q.args, :(u <<= $(MT(W))))
M >>= 1
end
push!(q.args, :(Mask{$WN}(u)))
q
end
@generated function _vstore_unroll!(
sptr::AbstractStridedPointer{T,D,C,B},
vu::VecUnroll{Nm1,W,VUT,VUV},
u::Unroll{AU,F,N,AV,W,M,UX,I},
sm::EVLMask{W},
::A,
::S,
::NT,
::StaticInt{RS},
::StaticInt{X}
) where {
AU,
F,
N,
AV,
W,
M,
I<:IndexNoUnroll,
T,
D,
Nm1,
S<:StaticBool,
A<:StaticBool,
NT<:StaticBool,
RS,
UX,
VUT,
VUV<:VecOrScalar,
X,
B,
C
}
N == Nm1 + 1 || throw(
ArgumentError(
"The unrolled index specifies unrolling by $N, but sored `VecUnroll` is unrolled by $(Nm1+1)."
)
)
VUT === T || return Expr(
:block,
Expr(:meta, :inline),
:(_vstore_unroll!(
sptr,
vconvert($T, vu),
u,
sm,
$(A()),
$(S()),
$(NT()),
$(StaticInt(RS)),
$(StaticInt(X))
))
)
if (T === Bit) && (F == W < 8) && (UX == 1) && (AV == AU == C > 0)
return quote
$(Expr(:meta, :inline))
msk = flattenmask(sm, Val{$M}(), StaticInt{$N}())
__vstore!(
pointer(sptr),
vu,
MM{$(N * W)}(_materialize(data(u))),
msk,
$A(),
$S(),
$NT(),
StaticInt{$RS}()
)
end
end
align = A === True
alias = S === True
notmp = NT === True
should_transpose = T !== Bit && should_transpose_memop(F, C, AU, AV, N, M)
if (W == N) & ((sizeof(T) * W) == RS) & should_transpose
vstore_transpose_quote(
D,
AU,
F,
N,
AV,
W,
UX,
align,
alias,
notmp,
RS,
sizeof(T),
JULIA_TYPES[T],
M,
true
)
end
# maybeshufflequote = shuffle_store_quote(T,(D,C,B,AU,F,N,AV,W,X), I, align, alias, notmp, RS, true)
# maybeshufflequote === nothing || return maybeshufflequote
if should_transpose
vstore_transpose_quote(
D,
AU,
F,
N,
AV,
W,
UX,
align,
alias,
notmp,
RS,
sizeof(T),
JULIA_TYPES[T],
M,
true
)
else
vstore_unroll_quote(
D,
AU,
F,
N,
AV,
W,
M,
UX,
true,
align,
alias,
notmp,
RS,
false
)
end
end
@generated function _vstore_unroll!(
sptr::AbstractStridedPointer{T,D,C},
vu::VecUnroll{Nm1,W,VUT,VUV},
u::Unroll{AU,F,N,AV,W,M,UX,I},
sm::AbstractMask{W},
::A,
::S,
::NT,
::StaticInt{RS},
::Any
) where {
AU,
F,
N,
AV,
W,
M,
I<:IndexNoUnroll,
T,
D,
Nm1,
S<:StaticBool,
A<:StaticBool,
NT<:StaticBool,
RS,
UX,
VUT,
VUV<:VecOrScalar,
C
}
N == Nm1 + 1 || throw(
ArgumentError(
"The unrolled index specifies unrolling by $N, but sored `VecUnroll` is unrolled by $(Nm1+1)."
)
)
VUT === T || return Expr(
:block,
Expr(:meta, :inline),
:(_vstore_unroll!(
sptr,
vconvert($T, vu),
u,
sm,
$(A()),
$(S()),
$(NT()),
$(StaticInt(RS)),
nothing
))
)
if (T === Bit) && (F == W < 8) && (UX == 1) && (AV == AU == C > 0)
return quote
$(Expr(:meta, :inline))
msk = flattenmask(sm, Val{$M}(), StaticInt{$N}())
__vstore!(
pointer(sptr),
vu,
MM{$(N * W)}(_materialize(data(u))),
msk,
$A(),
$S(),
$NT(),
StaticInt{$RS}()
)
end
end
align = A === True
alias = S === True
notmp = NT === True
if T !== Bit && should_transpose_memop(F, C, AU, AV, N, M)
vstore_transpose_quote(
D,
AU,
F,
N,
AV,
W,
UX,
align,
alias,
notmp,
RS,
sizeof(T),
JULIA_TYPES[T],
M,
sm <: EVLMask
)
else
vstore_unroll_quote(
D,
AU,
F,
N,
AV,
W,
M,
UX,
true,
A === True,
S === True,
NT === True,
RS,
false
)
end
end
# TODO: add `m::VecUnroll{Nm1,W,Bool}`
@generated function _vstore_unroll!(
sptr::AbstractStridedPointer{T,D},
vu::VecUnroll{Nm1,W,VUT,VUV},
u::Unroll{AU,F,N,AV,W,M,UX,I},
vm::VecUnroll{Nm1,<:Any,B},
::A,
::S,
::NT,
::StaticInt{RS}
) where {
AU,
F,
N,
AV,
W,
M,
I<:IndexNoUnroll,
T,
D,
Nm1,
S<:StaticBool,
A<:StaticBool,
NT<:StaticBool,
RS,
UX,
B<:Union{Bit,Bool},
VUT,
VUV<:VecOrScalar
}
N == Nm1 + 1 || throw(
ArgumentError(
"The unrolled index specifies unrolling by $N, but sored `VecUnroll` is unrolled by $(Nm1+1)."
)
)
VUT === T || return Expr(
:block,
Expr(:meta, :inline),
:(_vstore_unroll!(
sptr,
vconvert($T, vu),
u,
vm,
$(A()),
$(S()),
$(NT()),
$(StaticInt(RS))
))
)
# if (T === Bit) && (F == W < 8) && (UX == 1) && (AV == AU == C > 0)
# return quote
# $(Expr(:meta, :inline))
# msk = flattenmask(vm, Val{$M}())
# __vstore!(
# pointer(sptr),
# vu,
# MM{$(N * W)}(_materialize(data(u))),
# msk,
# $A(),
# $S(),
# $NT(),
# StaticInt{$RS}(),
# )
# end
# end
vstore_unroll_quote(
D,
AU,
F,
N,
AV,
W,
M,
UX,
true,
A === True,
S === True,
NT === True,
RS,
true
)
end
@inline function _vstore!(
ptr::AbstractStridedPointer,
vu::VecUnroll{Nm1,W},
u::Unroll{AU,F,N,AV,W},
::A,
::S,
::NT,
::StaticInt{RS}
) where {A<:StaticBool,S<:StaticBool,NT<:StaticBool,RS,AU,F,N,AV,W,Nm1}
p, li = linear_index(ptr, u)
sptr = similar_no_offset(ptr, p)
_vstore_unroll!(
sptr,
vu,
li,
A(),
S(),
NT(),
StaticInt{RS}(),
staticunrolledvectorstride(static_strides(sptr), u)
)
end
@inline function _vstore!(
ptr::AbstractStridedPointer,
vu::VecUnroll{Nm1,W},
u::Unroll{AU,F,N,AV,W},
m::AbstractMask{W},
::A,
::S,
::NT,
::StaticInt{RS}
) where {A<:StaticBool,S<:StaticBool,NT<:StaticBool,RS,AU,F,N,AV,W,Nm1}
p, li = linear_index(ptr, u)
sptr = similar_no_offset(ptr, p)
_vstore_unroll!(
sptr,
vu,
li,
m,
A(),
S(),
NT(),
StaticInt{RS}(),
staticunrolledvectorstride(static_strides(sptr), u)
)
end
@inline function _vstore!(
ptr::AbstractStridedPointer,
vu::VecUnroll{Nm1,W},
u::Unroll{AU,F,N,AV,W},
m::VecUnroll{Nm1,<:Any,B},
::A,
::S,
::NT,
::StaticInt{RS}
) where {
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS,
Nm1,
W,
B<:Union{Bool,Bit},
AU,
F,
N,
AV
}
p, li = linear_index(ptr, u)
sptr = similar_no_offset(ptr, p)
# @show vu u m
_vstore_unroll!(sptr, vu, li, m, A(), S(), NT(), StaticInt{RS}())
end
@inline function _vstore!(
ptr::AbstractStridedPointer,
vu::VecUnroll{Nm1,W},
u::Unroll{AU,F,N,AV,W},
m::Bool,
::A,
::S,
::NT,
::StaticInt{RS}
) where {A<:StaticBool,S<:StaticBool,NT<:StaticBool,RS,AU,F,N,AV,W,Nm1}
m && _vstore!(ptr, vu, u, A(), S(), NT(), StaticInt{RS}())
nothing
end
@inline function _vstore!(
sptr::AbstractStridedPointer,
v::V,
u::Unroll{AU,F,N,AV,W},
::A,
::S,
::NT,
::StaticInt{RS}
) where {
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS,
W,
T,
V<:AbstractSIMDVector{W,T},
AU,
F,
N,
AV
}
_vstore!(
sptr,
vconvert(VecUnroll{Int(StaticInt{N}() - One()),W,T,Vec{W,T}}, v),
u,
A(),
S(),
NT(),
StaticInt{RS}()
)
end
@inline function _vstore!(
sptr::AbstractStridedPointer,
v::V,
u::Unroll{AU,F,N,AV,W},
m::Union{Bool,AbstractMask,VecUnroll},
::A,
::S,
::NT,
::StaticInt{RS}
) where {
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS,
W,
T,
V<:AbstractSIMDVector{W,T},
AU,
F,
N,
AV
}
_vstore!(
sptr,
vconvert(VecUnroll{Int(StaticInt{N}() - One()),W,T,Vec{W,T}}, v),
u,
m,
A(),
S(),
NT(),
StaticInt{RS}()
)
end
@inline function _vstore!(
sptr::AbstractStridedPointer{T},
x::NativeTypes,
u::Unroll{AU,F,N,AV,W},
::A,
::S,
::NT,
::StaticInt{RS}
) where {
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS,
W,
T<:NativeTypes,
AU,
F,
N,
AV
}
# @show typeof(x), VecUnroll{Int(StaticInt{N}()-One()),W,T,Vec{W,T}}
_vstore!(
sptr,
vconvert(VecUnroll{Int(StaticInt{N}() - One()),W,T,Vec{W,T}}, x),
u,
A(),
S(),
NT(),
StaticInt{RS}()
)
end
@inline function _vstore!(
sptr::AbstractStridedPointer{T},
x::NativeTypes,
u::Unroll{AU,F,N,AV,W},
m::Union{Bool,AbstractMask,VecUnroll},
::A,
::S,
::NT,
::StaticInt{RS}
) where {
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS,
W,
T<:NativeTypes,
AU,
F,
N,
AV
}
_vstore!(
sptr,
vconvert(VecUnroll{Int(StaticInt{N}() - One()),W,T,Vec{W,T}}, x),
u,
m,
A(),
S(),
NT(),
StaticInt{RS}()
)
end
@inline function _vstore!(
sptr::AbstractStridedPointer{T},
v::NativeTypes,
u::Unroll{AU,F,N,-1,1},
::A,
::S,
::NT,
::StaticInt{RS}
) where {A<:StaticBool,S<:StaticBool,NT<:StaticBool,RS,T<:NativeTypes,AU,F,N}
_vstore!(
sptr,
vconvert(VecUnroll{Int(StaticInt{N}() - One()),1,T,T}, v),
u,
A(),
S(),
NT(),
StaticInt{RS}()
)
end
@inline function _vstore!(
sptr::AbstractStridedPointer{T},
v::NativeTypes,
u::Unroll{AU,F,N,-1,1},
m::Union{Bool,AbstractMask,VecUnroll},
::A,
::S,
::NT,
::StaticInt{RS}
) where {A<:StaticBool,S<:StaticBool,NT<:StaticBool,RS,T<:NativeTypes,AU,F,N}
_vstore!(
sptr,
vconvert(VecUnroll{Int(StaticInt{N}() - One()),1,T,T}, v),
u,
m,
A(),
S(),
NT(),
StaticInt{RS}()
)
end
@inline function _vstore!(
sptr::AbstractStridedPointer{T},
vs::VecUnroll{Nm1,1},
u::Unroll{AU,F,N,AV,W},
::A,
::S,
::NT,
::StaticInt{RS}
) where {
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS,
T<:NativeTypes,
AU,
F,
N,
Nm1,
W,
AV
}
vb = _vbroadcast(StaticInt{W}(), vs, StaticInt{RS}())
_vstore!(sptr, vb, u, A(), S(), NT(), StaticInt{RS}())
end
@inline function _vstore!(
ptr::AbstractStridedPointer{T},
vu::VecUnroll{Nm1,1},
u::Unroll{AU,F,N,AV,1},
::A,
::S,
::NT,
::StaticInt{RS}
) where {
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS,
T<:NativeTypes,
AU,
F,
N,
Nm1,
AV
}
p, li = linear_index(ptr, u)
sptr = similar_no_offset(ptr, p)
_vstore_unroll!(
sptr,
vu,
li,
A(),
S(),
NT(),
StaticInt{RS}(),
staticunrolledvectorstride(static_strides(sptr), u)
)
end
for M ∈ [:Bool, :AbstractMask]
@eval begin
@inline function _vstore!(
sptr::AbstractStridedPointer{T},
vs::VecUnroll{Nm1,1,T,T},
u::Unroll{AU,F,N,AV,W},
m::$M,
::A,
::S,
::NT,
::StaticInt{RS}
) where {
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS,
T<:NativeTypes,
AU,
F,
N,
Nm1,
W,
AV
}
vb = _vbroadcast(StaticInt{W}(), vs, StaticInt{RS}())
_vstore!(sptr, vb, u, m, A(), S(), NT(), StaticInt{RS}())
end
@inline function _vstore!(
ptr::AbstractStridedPointer{T},
vu::VecUnroll{Nm1,1,T,T},
u::Unroll{AU,F,N,AV,1,M},
m::$M,
::A,
::S,
::NT,
::StaticInt{RS}
) where {
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS,
T<:NativeTypes,
AU,
F,
N,
Nm1,
AV,
M
}
p, li = linear_index(ptr, u)
sptr = similar_no_offset(ptr, p)
_vstore_unroll!(
sptr,
vu,
li,
m,
A(),
S(),
NT(),
StaticInt{RS}(),
staticunrolledvectorstride(static_strides(sptr), u)
)
end
end
end
@inline function _vstore!(
sptr::AbstractStridedPointer{T},
vs::VecUnroll{Nm1,1,T,T},
u::Unroll{AU,F,N,AV,W},
m::VecUnroll{Nm1,<:Any,<:Union{Bool,Bit}},
::A,
::S,
::NT,
::StaticInt{RS}
) where {
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS,
T<:NativeTypes,
AU,
F,
N,
Nm1,
W,
AV
}
vb = _vbroadcast(StaticInt{W}(), vs, StaticInt{RS}())
_vstore!(sptr, vb, u, m, A(), S(), NT(), StaticInt{RS}())
end
@inline function _vstore!(
ptr::AbstractStridedPointer{T},
vu::VecUnroll{Nm1,1,T,T},
u::Unroll{AU,F,N,AV,1,M},
m::VecUnroll{Nm1,<:Any,<:Union{Bool,Bit}},
::A,
::S,
::NT,
::StaticInt{RS}
) where {
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS,
T<:NativeTypes,
AU,
F,
N,
Nm1,
AV,
M
}
p, li = linear_index(ptr, u)
sptr = similar_no_offset(ptr, p)
_vstore_unroll!(sptr, vu, li, m, A(), S(), NT(), StaticInt{RS}())
end
function vload_double_unroll_quote(
D::Int,
NO::Int,
NI::Int,
AUO::Int,
FO::Int,
AV::Int,
W::Int,
MO::UInt,
X::Int,
C::Int,
AUI::Int,
FI::Int,
MI::UInt,
mask::Bool,
A::Bool,
RS::Int,
svus::Int
)
# UO + 1 ≠ NO && throw(ArgumentError("Outer unroll being stores is unrolled $(UO+1) times, but index indicates it was unrolled $NO times."))
# UI + 1 ≠ NI && throw(ArgumentError("Inner unroll being stores is unrolled $(UI+1) times, but index indicates it was unrolled $NI times."))
q = Expr(
:block,
Expr(:meta, :inline),
:(id = getfield(getfield(u, :i), :i)),
:(gptr = similar_no_offset(sptr, gep(pointer(sptr), id)))
)
aexpr = Expr(:call, A ? :True : :False)
rsexpr = Expr(:call, Expr(:curly, :StaticInt, RS))
if (AUO == C) & ((AV ≠ C) | ((AV == C) & (X == NO))) # outer unroll is along contiguous axis, so we swap outer and inner
# we loop over `UI+1`, constructing VecUnrolls "slices", and store those
unroll = :(Unroll{$AUO,$FO,$NO,$AV,$W,$MO,$X}(Zero()))
# tupvec = Vector{Expr}(undef, NI)
vds = Vector{Symbol}(undef, NI)
for ui ∈ 0:NI-1
if ui == 0
loadq = :(_vload_unroll(gptr, $unroll)) # VecUnroll($tup)
else
inds = sparse_index_tuple(D, AUI, ui * FI)
loadq = :(_vload_unroll(gesp(gptr, $inds), $unroll)) # VecUnroll($tup)
end
if mask & (MI % Bool)
push!(loadq.args, :m)
end
MI >>>= 1
push!(loadq.args, aexpr, rsexpr)
if svus == typemax(Int)
push!(loadq.args, nothing)
else
push!(loadq.args, :(StaticInt{$svus}()))
end
vds[ui+1] = vul = Symbol(:vul_, ui)
push!(q.args, Expr(:(=), vul, :(getfield($loadq, 1))))
end
otup = Expr(:tuple)
for t ∈ 1:NO # transpose them
tup = Expr(:tuple)
# tup = ui == 0 ? Expr(:tuple) : tupvec[ui+1]
for ui ∈ 1:NI
# push!(tup.args, :(getfield($(vds[t]), $(ui+1), false)))
push!(tup.args, :(getfield($(vds[ui]), $t, false)))
end
push!(otup.args, :(VecUnroll($tup)))
end
push!(q.args, :(VecUnroll($otup)))
else # we loop over `UO+1` and do the loads
unroll = :(Unroll{$AUI,$FI,$NI,$AV,$W,$MI,$X}(Zero()))
tup = Expr(:tuple)
for uo ∈ 0:NO-1
if uo == 0
loadq = :(_vload_unroll(gptr, $unroll))
else
inds = sparse_index_tuple(D, AUO, uo * FO)
loadq = :(_vload_unroll(gesp(gptr, $inds), $unroll))
end
if mask & (MO % Bool)
push!(loadq.args, :m)
end
MO >>>= 1
push!(loadq.args, aexpr, rsexpr)
if svus == typemax(Int)
push!(loadq.args, nothing)
else
push!(loadq.args, :(StaticInt{$svus}()))
end
push!(tup.args, loadq)
end
push!(q.args, :(VecUnroll($tup)))
end
return q
end
@generated function _vload_unroll(
sptr::AbstractStridedPointer{T,D,C},
u::UU,
::A,
::StaticInt{RS},
::StaticInt{SVUS}
) where {T,A<:StaticBool,RS,D,C,SVUS,UU<:NestedUnroll}
AUO, FO, NO, AV, W, MO, X, U = unroll_params(UU)
AUI, FI, NI, AV, W, MI, X, I = unroll_params(U)
vload_double_unroll_quote(
D,
NO,
NI,
AUO,
FO,
AV,
W,
MO,
X,
C,
AUI,
FI,
MI,
false,
A === True,
RS,
SVUS
)
end
@generated function _vload_unroll(
sptr::AbstractStridedPointer{T,D,C},
u::UU,
::A,
::StaticInt{RS},
::Nothing
) where {T,A<:StaticBool,RS,D,C,UU<:NestedUnroll}
AUO, FO, NO, AV, W, MO, X, U = unroll_params(UU)
AUI, FI, NI, AV, W, MI, X, I = unroll_params(U)
vload_double_unroll_quote(
D,
NO,
NI,
AUO,
FO,
AV,
W,
MO,
X,
C,
AUI,
FI,
MI,
false,
A === True,
RS,
typemax(Int)
)
end
@generated function _vload_unroll(
sptr::AbstractStridedPointer{T,D,C},
u::UU,
m::AbstractMask{W},
::A,
::StaticInt{RS},
::StaticInt{SVUS}
) where {W,T,A<:StaticBool,RS,D,C,SVUS,UU<:NestedUnroll{W}}
AUO, FO, NO, AV, _W, MO, X, U = unroll_params(UU)
AUI, FI, NI, AV, _W, MI, X, I = unroll_params(U)
vload_double_unroll_quote(
D,
NO,
NI,
AUO,
FO,
AV,
W,
MO,
X,
C,
AUI,
FI,
MI,
true,
A === True,
RS,
SVUS
)
end
@generated function _vload_unroll(
sptr::AbstractStridedPointer{T,D,C},
u::UU,
m::AbstractMask{W},
::A,
::StaticInt{RS},
::Nothing
) where {W,T,A<:StaticBool,RS,D,C,UU<:NestedUnroll{W}}
AUO, FO, NO, AV, _W, MO, X, U = unroll_params(UU)
AUI, FI, NI, AV, _W, MI, X, I = unroll_params(U)
vload_double_unroll_quote(
D,
NO,
NI,
AUO,
FO,
AV,
W,
MO,
X,
C,
AUI,
FI,
MI,
true,
A === True,
RS,
typemax(Int)
)
end
# Unroll{AU,F,N,AV,W,M,X,I}
function vstore_double_unroll_quote(
D::Int,
NO::Int,
NI::Int,
AUO::Int,
FO::Int,
AV::Int,
W::Int,
MO::UInt,
X::Int,
C::Int,
AUI::Int,
FI::Int,
MI::UInt,
mask::Bool,
A::Bool,
S::Bool,
NT::Bool,
RS::Int,
svus::Int
)
# UO + 1 ≠ NO && throw(ArgumentError("Outer unroll being stores is unrolled $(UO+1) times, but index indicates it was unrolled $NO times."))
# UI + 1 ≠ NI && throw(ArgumentError("Inner unroll being stores is unrolled $(UI+1) times, but index indicates it was unrolled $NI times."))
q = Expr(
:block,
Expr(:meta, :inline),
:(vd = getfield(v, :data)),
:(id = getfield(getfield(u, :i), :i)),
:(gptr = similar_no_offset(sptr, gep(pointer(sptr), id)))
)
aexpr = Expr(:call, A ? :True : :False)
sexpr = Expr(:call, S ? :True : :False)
ntexpr = Expr(:call, NT ? :True : :False)
rsexpr = Expr(:call, Expr(:curly, :StaticInt, RS))
if (AUO == C) & ((AV ≠ C) | ((AV == C) & (X == NO))) # outer unroll is along contiguous axis, so we swap outer and inner
# so we loop over `UI+1`, constructing VecUnrolls "slices", and store those
unroll = :(Unroll{$AUO,$FO,$NO,$AV,$W,$MO,$X}(Zero()))
vds = Vector{Symbol}(undef, NO)
for t ∈ 1:NO
vds[t] = vdt = Symbol(:vd_, t)
push!(q.args, :($vdt = getfield(getfield(vd, $t, false), 1)))
end
# tupvec = Vector{Expr}(undef, NI)
for ui ∈ 0:NI-1
tup = Expr(:tuple)
# tup = ui == 0 ? Expr(:tuple) : tupvec[ui+1]
for t ∈ 1:NO
# push!(tup.args, :(getfield($(vds[t]), $(ui+1), false)))
push!(tup.args, :(getfield($(vds[t]), $(ui + 1), false)))
end
# tupvec[ui+1] = tup
if ui == 0
storeq = :(_vstore_unroll!(gptr, VecUnroll($tup), $unroll))
else
inds = sparse_index_tuple(D, AUI, ui * FI)
storeq = :(_vstore_unroll!(gesp(gptr, $inds), VecUnroll($tup), $unroll))
end
if mask & (MI % Bool)
push!(storeq.args, :m)
end
MI >>>= 1
push!(storeq.args, aexpr, sexpr, ntexpr, rsexpr)
if svus == typemax(Int)
push!(storeq.args, nothing)
else
push!(storeq.args, :(StaticInt{$svus}()))
end
push!(q.args, storeq)
end
else # we loop over `UO+1` and do the stores
unroll = :(Unroll{$AUI,$FI,$NI,$AV,$W,$MI,$X}(Zero()))
for uo ∈ 0:NO-1
if uo == 0
storeq = :(_vstore_unroll!(gptr, getfield(vd, 1, false), $unroll))
else
inds = sparse_index_tuple(D, AUO, uo * FO)
storeq = :(_vstore_unroll!(
gesp(gptr, $inds),
getfield(vd, $(uo + 1), false),
$unroll
))
end
if mask & (MO % Bool)
push!(storeq.args, :m)
end
MO >>>= 1
push!(storeq.args, aexpr, sexpr, ntexpr, rsexpr)
if svus == typemax(Int)
push!(storeq.args, nothing)
else
push!(storeq.args, :(StaticInt{$svus}()))
end
push!(q.args, storeq)
end
end
return q
end
@inline function _vstore_unroll!(
sptr::AbstractStridedPointer{T1,D,C},
v::VecUnroll{<:Any,W,T2,<:VecUnroll{<:Any,W,T2,Vec{W,T2}}},
u::UU,
::A,
::S,
::NT,
::StaticInt{RS},
::SVUS
) where {T1,D,C,W,T2,UU,A,S,NT,RS,SVUS}
_vstore_unroll!(
sptr,
vconvert(T1, v),
u,
A(),
S(),
NT(),
StaticInt{RS}(),
SVUS()
)
end
@inline function _vstore_unroll!(
sptr::AbstractStridedPointer{T1,D,C},
v::VecUnroll{<:Any,W,T2,<:VecUnroll{<:Any,W,T2,Vec{W,T2}}},
u::UU,
m::M,
::A,
::S,
::NT,
::StaticInt{RS},
::SVUS
) where {T1,D,C,W,T2,UU,A,S,NT,RS,SVUS,M}
_vstore_unroll!(
sptr,
vconvert(T1, v),
u,
m,
A(),
S(),
NT(),
StaticInt{RS}(),
SVUS()
)
end
@generated function _vstore_unroll!(
sptr::AbstractStridedPointer{T,D,C},
v::VecUnroll{<:Any,W,T,<:VecUnroll{<:Any,W,T,Vec{W,T}}},
u::UU,
::A,
::S,
::NT,
::StaticInt{RS},
::StaticInt{SVUS}
) where {
W,
T,
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS,
D,
C,
SVUS,
UU<:NestedUnroll{W}
}
AUO, FO, NO, AV, _W, MO, X, U = unroll_params(UU)
AUI, FI, NI, AV, _W, MI, X, I = unroll_params(U)
vstore_double_unroll_quote(
D,
NO,
NI,
AUO,
FO,
AV,
W,
MO,
X,
C,
AUI,
FI,
MI,
false,
A === True,
S === True,
NT === True,
RS,
SVUS
)
end
@generated function _vstore_unroll!(
sptr::AbstractStridedPointer{T,D,C},
v::VecUnroll{<:Any,W,T,<:VecUnroll{<:Any,W,T,Vec{W,T}}},
u::UU,
::A,
::S,
::NT,
::StaticInt{RS},
::Nothing
) where {
W,
T,
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS,
D,
C,
UU<:NestedUnroll{W}
}
AUO, FO, NO, AV, _W, MO, X, U = unroll_params(UU)
AUI, FI, NI, AV, _W, MI, X, I = unroll_params(U)
vstore_double_unroll_quote(
D,
NO,
NI,
AUO,
FO,
AV,
W,
MO,
X,
C,
AUI,
FI,
MI,
false,
A === True,
S === True,
NT === True,
RS,
typemax(Int)
)
end
@generated function _vstore_unroll!(
sptr::AbstractStridedPointer{T,D,C},
v::VecUnroll{<:Any,W,T,<:VecUnroll{<:Any,W,T,Vec{W,T}}},
u::UU,
m::AbstractMask{W},
::A,
::S,
::NT,
::StaticInt{RS},
::StaticInt{SVUS}
) where {
W,
T,
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS,
D,
C,
SVUS,
UU<:NestedUnroll{W}
}
AUO, FO, NO, AV, _W, MO, X, U = unroll_params(UU)
AUI, FI, NI, AV, _W, MI, X, I = unroll_params(U)
vstore_double_unroll_quote(
D,
NO,
NI,
AUO,
FO,
AV,
W,
MO,
X,
C,
AUI,
FI,
MI,
true,
A === True,
S === True,
NT === True,
RS,
SVUS
)
end
@generated function _vstore_unroll!(
sptr::AbstractStridedPointer{T,D,C},
v::VecUnroll{<:Any,W,T,<:VecUnroll{<:Any,W,T,Vec{W,T}}},
u::UU,
m::AbstractMask{W},
::A,
::S,
::NT,
::StaticInt{RS},
::Nothing
) where {
W,
T,
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS,
D,
C,
UU<:NestedUnroll{W}
}
AUO, FO, NO, AV, _W, MO, X, U = unroll_params(UU)
AUI, FI, NI, AV, _W, MI, X, I = unroll_params(U)
vstore_double_unroll_quote(
D,
NO,
NI,
AUO,
FO,
AV,
W,
MO,
X,
C,
AUI,
FI,
MI,
true,
A === True,
S === True,
NT === True,
RS,
typemax(Int)
)
end
function vstore_unroll_i_quote(Nm1, Wsplit, W, A, S, NT, rs::Int, mask::Bool)
N = Nm1 + 1
N * Wsplit == W || throw(
ArgumentError(
"Vector of length $W can't be split into $N pieces of size $Wsplit."
)
)
q =
Expr(:block, Expr(:meta, :inline), :(vt = data(v)), :(im = _materialize(i)))
if mask
let U = mask_type_symbol(Wsplit)
push!(
q.args,
:(mt = data(vconvert(VecUnroll{$Nm1,$Wsplit,Bit,Mask{$Wsplit,$U}}, m)))
)
end
end
j = 0
alignval = Expr(:call, A ? :True : :False)
aliasval = Expr(:call, S ? :True : :False)
notmpval = Expr(:call, NT ? :True : :False)
rsexpr = Expr(:call, Expr(:curly, :StaticInt, rs))
for n ∈ 1:N
shufflemask = Expr(:tuple)
for w ∈ 1:Wsplit
push!(shufflemask.args, j)
j += 1
end
ex = :(__vstore!(ptr, vt[$n], shufflevector(im, Val{$shufflemask}())))
mask &&
push!(ex.args, Expr(:call, GlobalRef(Core, :getfield), :mt, n, false))
push!(ex.args, alignval, aliasval, notmpval, rsexpr)
push!(q.args, ex)
end
q
end
@generated function __vstore!(
ptr::Ptr{T},
v::VecUnroll{Nm1,Wsplit},
i::VectorIndex{W},
::A,
::S,
::NT,
::StaticInt{RS}
) where {T,Nm1,Wsplit,W,S<:StaticBool,A<:StaticBool,NT<:StaticBool,RS}
vstore_unroll_i_quote(
Nm1,
Wsplit,
W,
A === True,
S === True,
NT === True,
RS,
false
)
end
@generated function __vstore!(
ptr::Ptr{T},
v::VecUnroll{Nm1,Wsplit},
i::VectorIndex{W},
m::AbstractMask{W},
::A,
::S,
::NT,
::StaticInt{RS}
) where {T,Nm1,Wsplit,W,S<:StaticBool,A<:StaticBool,NT<:StaticBool,RS}
vstore_unroll_i_quote(
Nm1,
Wsplit,
W,
A === True,
S === True,
NT === True,
RS,
true
)
end
function vstorebit_unroll_i_quote(
Nm1::Int,
Wsplit::Int,
W::Int,
A::Bool,
S::Bool,
NT::Bool,
rs::Int,
mask::Bool
)
N = Nm1 + 1
N * Wsplit == W || throw(
ArgumentError(
"Vector of length $W can't be split into $N pieces of size $Wsplit."
)
)
# W == 8 || throw(ArgumentError("There is only a need for splitting a mask of size 8, but the mask is of size $W."))
# q = Expr(:block, Expr(:meta, :inline), :(vt = data(v)), :(im = _materialize(i)), :(u = 0x00))
U = mask_type(W)
q = Expr(:block, Expr(:meta, :inline), :(vt = data(v)), :(u = zero($U)))
j = 0
gf = GlobalRef(Core, :getfield)
while true
push!(q.args, :(u |= data($(Expr(:call, gf, :vt, (N - j), false)))))
j += 1
j == N && break
push!(q.args, :(u <<= $Wsplit))
end
alignval = Expr(:call, A ? :True : :False)
aliasval = Expr(:call, A ? :True : :False)
notmpval = Expr(:call, A ? :True : :False)
rsexpr = Expr(:call, Expr(:curly, :StaticInt, rs))
mask && push!(
q.args,
:(
u = bitselect(
data(m),
__vload(
Base.unsafe_convert(Ptr{$(mask_type_symbol(W))}, ptr),
(data(i) >> 3),
$alignval,
$rsexpr
),
u
)
)
)
call =
Expr(:call, :__vstore!, :(reinterpret(Ptr{$U}, ptr)), :u, :(data(i) >> 3))
push!(call.args, alignval, aliasval, notmpval, rsexpr)
push!(q.args, call)
q
end
@generated function __vstore!(
ptr::Ptr{Bit},
v::VecUnroll{Nm1,Wsplit,Bit,M},
i::MM{W},
::A,
::S,
::NT,
::StaticInt{RS}
) where {
Nm1,
Wsplit,
W,
S<:StaticBool,
A<:StaticBool,
NT<:StaticBool,
RS,
M<:AbstractMask{Wsplit}
}
vstorebit_unroll_i_quote(
Nm1,
Wsplit,
W,
A === True,
S === True,
NT === True,
RS,
false
)
end
@generated function __vstore!(
ptr::Ptr{Bit},
v::VecUnroll{Nm1,Wsplit,Bit,M},
i::MM{W},
m::Mask{W},
::A,
::S,
::NT,
::StaticInt{RS}
) where {
Nm1,
Wsplit,
W,
S<:StaticBool,
A<:StaticBool,
NT<:StaticBool,
RS,
M<:AbstractMask{Wsplit}
}
vstorebit_unroll_i_quote(
Nm1,
Wsplit,
W,
A === True,
S === True,
NT === True,
RS,
true
)
end
# If `::Function` vectorization is masked, then it must not be reduced by `::Function`.
@generated function _vstore!(
::G,
ptr::AbstractStridedPointer{T,D,C},
vu::VecUnroll{U,W},
u::Unroll{AU,F,N,AV,W,M,X,I},
m,
::A,
::S,
::NT,
::StaticInt{RS}
) where {
T,
D,
C,
U,
AU,
F,
N,
W,
M,
I,
AV,
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS,
X,
G<:Function
}
N == U + 1 || throw(
ArgumentError(
"The unrolled index specifies unrolling by $N, but sored `VecUnroll` is unrolled by $(U+1)."
)
)
# mask means it isn't vectorized
AV > 0 || throw(ArgumentError("AV ≤ 0, but masking what, exactly?"))
Expr(
:block,
Expr(:meta, :inline),
:(_vstore!(ptr, vu, u, m, $(A()), $(S()), $(NT()), StaticInt{$RS}()))
)
end
function transposeshuffle(split, W, offset::Bool)
tup = Expr(:tuple)
w = 0
S = 1 << split
i = offset ? S : 0
while w < W
for s ∈ 0:S-1
push!(tup.args, w + s + i)
end
for s ∈ 0:S-1
# push!(tup.args, w + W + s)
push!(tup.args, w + W + s + i)
end
w += 2S
end
Expr(:call, Expr(:curly, :Val, tup))
end
function horizontal_reduce_store_expr(
W::Int,
Ntotal::Int,
(C, D, AU, F)::NTuple{4,Int},
op::Symbol,
reduct::Symbol,
noalias::Bool,
RS::Int,
mask::Bool
)
N = ((C == AU) && isone(F)) ? prevpow2(Ntotal) : 0
q = Expr(:block, Expr(:meta, :inline), :(v = data(vu)))
mask && push!(q.args, :(masktuple = data(m)))
# mask && push!(q.args, :(unsignedmask = data(tomask(m))))
# store = noalias ? :vnoaliasstore! : :vstore!
falseexpr = Expr(:call, :False)
aliasexpr = noalias ? Expr(:call, :True) : falseexpr
rsexpr = Expr(:call, Expr(:curly, :StaticInt, RS))
ispow2(W) ||
throw(ArgumentError("Horizontal store requires power-of-2 vector widths."))
gf = GlobalRef(Core, :getfield)
if N > 1
push!(q.args, :(gptr = gesp(ptr, $gf(u, :i))))
push!(q.args, :(bptr = pointer(gptr)))
extractblock = Expr(:block)
vectors = [Symbol(:v_, n) for n ∈ 0:N-1]
for n ∈ 1:N
push!(
extractblock.args,
Expr(:(=), vectors[n], Expr(:call, gf, :v, n, false))
)
end
push!(q.args, extractblock)
ncomp = 0
minWN = min(W, N)
while ncomp < N
Nt = minWN
Wt = W
splits = 0
while Nt > 1
Nt >>>= 1
shuffle0 = transposeshuffle(splits, Wt, false)
shuffle1 = transposeshuffle(splits, Wt, true)
splits += 1
for nh ∈ 1:Nt
n1 = 2nh
n0 = n1 - 1
v0 = vectors[n0+ncomp]
v1 = vectors[n1+ncomp]
vh = vectors[nh+ncomp]
# combine n0 and n1
push!(
q.args,
Expr(
:(=),
vh,
Expr(
:call,
op,
Expr(:call, :shufflevector, v0, v1, shuffle0),
Expr(:call, :shufflevector, v0, v1, shuffle1)
)
)
)
end
end
# v0 is now the only vector
v0 = vectors[ncomp+1]
while Wt > minWN
Wh = Wt >>> 1
v0new = Symbol(v0, Wt)
push!(
q.args,
Expr(
:(=),
v0new,
Expr(
:call,
op,
Expr(
:call,
:shufflevector,
v0,
Expr(
:call,
Expr(:curly, :Val, Expr(:tuple, [w for w ∈ 0:Wh-1]...))
)
),
Expr(
:call,
:shufflevector,
v0,
Expr(
:call,
Expr(:curly, :Val, Expr(:tuple, [w for w ∈ Wh:Wt-1]...))
)
)
)
)
)
v0 = v0new
Wt = Wh
end
if ncomp == 0
storeexpr = Expr(:call, :__vstore!, :bptr, v0)
else
storeexpr = Expr(:call, :_vstore!, :gptr, v0)
zeroexpr = Expr(:call, Expr(:curly, :StaticInt, 0))
ind = Expr(:tuple)
foreach(_ -> push!(ind.args, zeroexpr), 1:D)
ind.args[AU] = Expr(:call, Expr(:curly, :StaticInt, F * ncomp))
push!(storeexpr.args, ind)
end
if mask
boolmask = Expr(:call, :Vec)
for n ∈ ncomp+1:ncomp+minWN
push!(boolmask.args, Expr(:call, gf, :masktuple, n, false))
end
push!(storeexpr.args, Expr(:call, :tomask, boolmask))
end
# mask && push!(storeexpr.args, :(Mask{$minWN}(unsignedmask)))
push!(storeexpr.args, falseexpr, aliasexpr, falseexpr, rsexpr)
push!(q.args, storeexpr)
# mask && push!(q.args, :(unsignedmask >>>= $minWN))
ncomp += minWN
end
else
push!(q.args, :(gptr = gesp(ptr, $gf(u, :i))))
end
if N < Ntotal
zeroexpr = Expr(:call, Expr(:curly, :StaticInt, 0))
ind = Expr(:tuple)
foreach(_ -> push!(ind.args, zeroexpr), 1:D)
for n ∈ N+1:Ntotal
(n > N + 1) && (ind = copy(ind)) # copy to avoid overwriting old
ind.args[AU] = Expr(:call, Expr(:curly, :StaticInt, F * (n - 1)))
scalar = Expr(:call, reduct, Expr(:call, gf, :v, n, false))
storeexpr = Expr(
:call,
:_vstore!,
:gptr,
scalar,
ind,
falseexpr,
aliasexpr,
falseexpr,
rsexpr
)
if mask
push!(
q.args,
Expr(:&&, Expr(:call, gf, :masktuple, n, false), storeexpr)
)
else
push!(q.args, storeexpr)
end
end
end
q
end
@inline function _vstore!(
::G,
ptr::AbstractStridedPointer{T,D,C},
vu::VecUnroll{U,W},
u::Unroll{AU,F,N,AV,W,M,X,I},
::A,
::S,
::NT,
::StaticInt{RS}
) where {
T,
D,
C,
U,
AU,
F,
N,
W,
M,
I,
G<:Function,
AV,
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS,
X
}
_vstore!(ptr, vu, u, A(), S(), NT(), StaticInt{RS}())
end
# function _vstore!(
@generated function _vstore!(
::G,
ptr::AbstractStridedPointer{T,D,C},
vu::VecUnroll{U,W},
u::Unroll{AU,F,N,AV,1,M,X,I},
::A,
::S,
::NT,
::StaticInt{RS}
) where {
T,
D,
C,
U,
AU,
F,
N,
W,
M,
I,
G<:Function,
AV,
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS,
X
}
N == U + 1 || throw(
ArgumentError(
"The unrolled index specifies unrolling by $N, but sored `VecUnroll` is unrolled by $(U+1)."
)
)
if (G === typeof(identity)) || (AV > 0) || (W == 1)
return Expr(
:block,
Expr(:meta, :inline),
:(_vstore!(ptr, vu, u, $A(), $S(), $NT(), StaticInt{$RS}()))
)
elseif G === typeof(vsum)
op = :+
reduct = :vsum
elseif G === typeof(vprod)
op = :*
reduct = :vprod
elseif G === typeof(vmaximum)
op = :max
reduct = :vmaximum
elseif G === typeof(vminimum)
op = :min
reduct = :vminimum
elseif G === typeof(vall)
op = :&
reduct = :vall
elseif G === typeof(vany)
op = :|
reduct = :vany
else
throw("Function $G not recognized.")
end
horizontal_reduce_store_expr(
W,
N,
(C, D, AU, F),
op,
reduct,
S === True,
RS,
false
)
end
@generated function _vstore!(
::G,
ptr::AbstractStridedPointer{T,D,C},
vu::VecUnroll{U,W},
u::Unroll{AU,F,N,AV,1,M,X,I},
m::VecUnroll{U,1,Bool,Bool},
::A,
::S,
::NT,
::StaticInt{RS}
) where {
T,
D,
C,
U,
AU,
F,
N,
W,
M,
I,
G<:Function,
AV,
A<:StaticBool,
S<:StaticBool,
NT<:StaticBool,
RS,
X
}
N == U + 1 || throw(
ArgumentError(
"The unrolled index specifies unrolling by $N, but sored `VecUnroll` is unrolled by $(U+1)."
)
)
1 + 2
if (G === typeof(identity)) || (AV > 0) || (W == 1)
return Expr(
:block,
Expr(:meta, :inline),
:(_vstore!(ptr, vu, u, $A(), $S(), $NT(), StaticInt{$RS}()))
)
elseif G === typeof(vsum)
op = :+
reduct = :vsum
elseif G === typeof(vprod)
op = :*
reduct = :vprod
elseif G === typeof(vmaximum)
op = :max
reduct = :vmaximum
elseif G === typeof(vminimum)
op = :min
reduct = :vminimum
elseif G === typeof(vall)
op = :&
reduct = :vall
elseif G === typeof(vany)
op = :|
reduct = :vany
else
throw("Function $G not recognized.")
end
horizontal_reduce_store_expr(
W,
N,
(C, D, AU, F),
op,
reduct,
S === True,
RS,
true
)
end
function lazymulunroll_load_quote(M, O, N, maskall, masklast, align, rs)
t = Expr(:tuple)
alignval = Expr(:call, align ? :True : :False)
rsexpr = Expr(:call, Expr(:curly, :StaticInt, rs))
gf = GlobalRef(Core, :getfield)
for n = 1:N+1
ind = if (M != 1) | (O != 0)
:(LazyMulAdd{$M,$O}(u[$n]))
else
Expr(:call, gf, :u, n, false)
end
call = if maskall
Expr(
:call,
:__vload,
:ptr,
ind,
Expr(:call, gf, :mt, n, false),
alignval,
rsexpr
)
elseif masklast && n == N + 1
Expr(:call, :__vload, :ptr, ind, :m, alignval, rsexpr)
else
Expr(:call, :__vload, :ptr, ind, alignval, rsexpr)
end
push!(t.args, call)
end
q = Expr(:block, Expr(:meta, :inline), :(u = data(um)))
maskall && push!(q.args, :(mt = data(m)))
push!(q.args, Expr(:call, :VecUnroll, t))
q
end
@generated function __vload(
ptr::Ptr{T},
um::VecUnroll{N,W,I,V},
::A,
::StaticInt{RS}
) where {T,N,W,I,V,A<:StaticBool,RS}
lazymulunroll_load_quote(1, 0, N, false, false, A === True, RS)
end
@generated function __vload(
ptr::Ptr{T},
um::VecUnroll{N,W,I,V},
m::VecUnroll{N,W,Bit,M},
::A,
::StaticInt{RS}
) where {T,N,W,I,V,A<:StaticBool,U,RS,M<:AbstractMask{W,U}}
lazymulunroll_load_quote(1, 0, N, true, false, A === True, RS)
end
@generated function __vload(
ptr::Ptr{T},
um::VecUnroll{N,W1,I,V},
m::AbstractMask{W2,U},
::A,
::StaticInt{RS}
) where {T,N,W1,W2,I,V,A<:StaticBool,U,RS}
if W1 == W2
lazymulunroll_load_quote(1, 0, N, false, true, A === True, RS)
elseif W2 == (N + 1) * W1
quote
$(Expr(:meta, :inline))
__vload(
ptr,
um,
VecUnroll(
splitvectortotuple(StaticInt{$(N + 1)}(), StaticInt{$W1}(), m)
),
$A(),
StaticInt{$RS}()
)
end
else
throw(
ArgumentError(
"Trying to load using $(N+1) indices of length $W1, while applying a mask of length $W2."
)
)
end
end
@generated function __vload(
ptr::Ptr{T},
um::LazyMulAdd{M,O,VecUnroll{N,W,I,V}},
::A,
::StaticInt{RS}
) where {T,M,O,N,W,I,V,A<:StaticBool,RS}
lazymulunroll_load_quote(M, O, N, false, false, A === True, RS)
end
@generated function __vload(
ptr::Ptr{T},
um::LazyMulAdd{M,O,VecUnroll{N,W,I,V}},
m::VecUnroll{N,W,Bit,MSK},
::A,
::StaticInt{RS}
) where {T,M,O,N,W,I,V,A<:StaticBool,U,RS,MSK<:AbstractMask{W,U}}
lazymulunroll_load_quote(M, O, N, true, false, A === True, RS)
end
@generated function __vload(
ptr::Ptr{T},
um::LazyMulAdd{M,O,VecUnroll{N,W1,I,V}},
m::AbstractMask{W2},
::A,
::StaticInt{RS}
) where {T,M,O,N,W1,W2,I,V,A<:StaticBool,RS}
if W1 == W2
lazymulunroll_load_quote(M, O, N, false, true, A === True, RS)
elseif W1 * (N + 1) == W2
quote
$(Expr(:meta, :inline))
__vload(
ptr,
um,
VecUnroll(
splitvectortotuple(StaticInt{$(N + 1)}(), StaticInt{$W1}(), m)
),
$A(),
StaticInt{$RS}()
)
end
else
throw(
ArgumentError(
"Trying to load using $(N+1) indices of length $W1, while applying a mask of length $W2."
)
)
end
end
function lazymulunroll_store_quote(
M,
O,
N,
mask,
align,
noalias,
nontemporal,
rs
)
gf = GlobalRef(Core, :getfield)
q = Expr(
:block,
Expr(:meta, :inline),
:(u = $gf($gf(um, :data), :data)),
:(v = $gf($gf(vm, :data), :data))
)
alignval = Expr(:call, align ? :True : :False)
noaliasval = Expr(:call, noalias ? :True : :False)
nontemporalval = Expr(:call, nontemporal ? :True : :False)
rsexpr = Expr(:call, Expr(:curly, :StaticInt, rs))
for n = 1:N+1
push!(
q.args,
Expr(
:call,
:vstore!,
:ptr,
Expr(:call, gf, :v, n, false),
:(LazyMulAdd{$M,$O}(u[$n])),
alignval,
noaliasval,
nontemporalval,
rsexpr
)
)
end
q
end
@generated function vload(
r::FastRange{T},
i::Unroll{1,W,N,1,W,M,X,Tuple{I}}
) where {T,I,W,N,M,X}
q = quote
$(Expr(:meta, :inline))
s = vload(r, data(i))
step = getfield(r, :s)
mm = Vec(MM{$W,$X}(Zero())) * step
v = Base.FastMath.add_fast(s + mm)
end
t = Expr(:tuple, :v)
for n ∈ 1:N-1
# push!(t.args, :(MM{$W,$W}(Base.FastMath.add_fast(s, $(T(n*W))))))
push!(
t.args,
:(Base.FastMath.add_fast(v, Base.FastMath.mul_fast($(T(n * W)), step)))
)
end
push!(q.args, :(VecUnroll($t)))
q
end
@generated function vload(
r::FastRange{T},
i::Unroll{1,W,N,1,W,M,X,Tuple{I}},
m::AbstractMask{W}
) where {T,I,W,N,M,X}
q = quote
$(Expr(:meta, :inline))
s = vload(r, data(i))
step = getfield(r, :s)
mm = Vec(MM{$W,$X}(Zero())) * step
v = Base.FastMath.add_fast(s + mm)
z = zero(v)
end
t = if M % Bool
Expr(:tuple, :(ifelse(m, v, z)))
else
Expr(:tuple, :v)
end
for n ∈ 1:N-1
M >>>= 1
if M % Bool
push!(
t.args,
:(ifelse(
m,
Base.FastMath.add_fast(v, Base.FastMath.mul_fast($(T(n * W)), step)),
z
))
)
else
push!(
t.args,
:(Base.FastMath.add_fast(v, Base.FastMath.mul_fast($(T(n * W)), step)))
)
end
end
push!(q.args, :(VecUnroll($t)))
q
end
@generated function vload(
r::FastRange{T},
i::Unroll{1,W,N,1,W,M,X,Tuple{I}},
m::VecUnroll{Nm1,W,B}
) where {T,I,W,N,M,X,Nm1,B<:Union{Bit,Bool}}
q = quote
$(Expr(:meta, :inline))
s = vload(r, data(i))
step = getfield(r, :s)
mm = Vec(MM{$W,$X}(Zero())) * step
v = Base.FastMath.add_fast(s + mm)
z = zero(v)
end
t = Expr(:tuple, :(ifelse(getfield(m, $1, false), v, z)))
for n ∈ 1:N-1
push!(
t.args,
:(ifelse(
getfield(m, $(n + 1), false),
Base.FastMath.add_fast(v, Base.FastMath.mul_fast($(T(n * W)), step)),
z
))
)
end
push!(q.args, :(VecUnroll($t)))
q
end
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 5302 | # The SLEEF.jl package is licensed under the MIT "Expat" License:
# > Copyright (c) 2016: Mustafa Mohamad and other contributors:
# >
# > https://github.com/musm/SLEEF.jl/graphs/contributors
# >
# > Permission is hereby granted, free of charge, to any person obtaining a copy
# > of this software and associated documentation files (the "Software"), to deal
# > in the Software without restriction, including without limitation the rights
# > to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# > copies of the Software, and to permit persons to whom the Software is
# > furnished to do so, subject to the following conditions:
# >
# > The above copyright notice and this permission notice shall be included in all
# > copies or substantial portions of the Software.
# >
# > THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# > IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# > FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# > AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# > LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# > OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# > SOFTWARE.
# >
# SLEEF.jl includes ported code from the following project
# - [SLEEF](https://github.com/shibatch/SLEEF) [public domain] Author Naoki Shibata
using Base.Math: significand_bits
isnzero(x::T) where {T<:AbstractFloat} = signbit(x)
ispzero(x::T) where {T<:AbstractFloat} = !signbit(x)
# function cmpdenorm(x::Tx, y::Ty) where {Tx <: AbstractFloat, Ty <: AbstractFloat}
# sizeof(Tx) < sizeof(Ty) ? y = Tx(y) : x = Ty(x) # cast larger type to smaller type
# (isnan(x) && isnan(y)) && return true
# (isnan(x) || isnan(y)) && return false
# (isinf(x) != isinf(y)) && return false
# (x == Tx(Inf) && y == Ty(Inf)) && return true
# (x == Tx(-Inf) && y == Ty(-Inf)) && return true
# if y == 0
# (ispzero(x) && ispzero(y)) && return true
# (isnzero(x) && isnzero(y)) && return true
# return false
# end
# (!isnan(x) && !isnan(y) && !isinf(x) && !isinf(y)) && return sign(x) == sign(y)
# return false
# end
# the following compares the ulp between x and y.
# First it promotes them to the larger of the two types x,y
infh(::Type{Float64}) = 1e300
infh(::Type{Float32}) = 1e37
function countulp(::Type{T}, __x, __y) where {T}
_x, _y = promote(__x, __y)
x, y = convert(T, _x), convert(T, _y) # Cast to smaller type
iszero(y) && return iszero(x) ? zero(x) : T(1004)
ulpc = convert(T, abs(_x - _y) / ulp(y))
nanulp = VectorizationBase.ifelse(isnan(x) ⊻ isnan(y), T(10000), T(0))
infulp = VectorizationBase.ifelse(
(sign(x) == sign(y)) & (abs(y) > infh(T)),
T(0),
T(10001)
)
ulpc = VectorizationBase.ifelse(
isinf(x),
infulp,
VectorizationBase.ifelse(isfinite(y), ulpc, T(10003))
)
ulpc = VectorizationBase.ifelse(isnan(x) | isnan(y), nanulp, ulpc)
ulpc = VectorizationBase.ifelse(
iszero(y),
VectorizationBase.ifelse(iszero(x), T(0), T(10002)),
ulpc
)
return ulpc
end
DENORMAL_MIN(::Type{Float64}) = 2.0^-1074
DENORMAL_MIN(::Type{Float32}) = 2.0f0^-149
function ulp(
x::Union{<:VectorizationBase.AbstractSIMD{<:Any,T},T}
) where {T<:AbstractFloat}
e = exponent(x)
# ulpc = max(VectorizationBase.vscalef(T(1.0), e - significand_bits(T)), DENORMAL_MIN(T))
ulpc = max(ldexp(T(1.0), e - significand_bits(T)), DENORMAL_MIN(T))
ulpc = VectorizationBase.ifelse(x == T(0.0), DENORMAL_MIN(T), ulpc)
return ulpc
end
countulp(x::T, y::T) where {T<:AbstractFloat} = countulp(T, x, y)
countulp(
x::VectorizationBase.AbstractSIMD{W,T},
y::VectorizationBase.AbstractSIMD{W,T}
) where {W,T<:AbstractFloat} = countulp(T, x, y)
# test the accuracy of a function where fun_table is a Dict mapping the function you want
# to test to a reference function
# xx is an array of values (which may be tuples for multiple arugment functions)
# tol is the acceptable tolerance to test against
function test_acc(
f1,
f2,
T,
xx,
tol,
::StaticInt{W} = pick_vector_width(T);
debug = false,
tol_debug = 5
) where {W}
@testset "accuracy $(f1)" begin
reference = map(f2 ∘ big, xx)
comp = similar(xx)
i = 0
spc = VectorizationBase.zstridedpointer(comp)
spx = VectorizationBase.zstridedpointer(xx)
GC.@preserve xx comp begin
while i < length(xx)
vstore!(spc, f1(vload(spx, (MM{W}(i),))), (MM{W}(i),))
i += W
end
end
rmax = 0.0
rmean = 0.0
xmax = map(zero, first(xx))
for i ∈ eachindex(xx)
q = comp[i]
c = reference[i]
u = countulp(T, q, c)
rmax = max(rmax, u)
xmax = rmax == u ? xx[i] : xmax
rmean += u
if xx[i] == 36.390244f0
@show f1, q, f2, T(c), xx[i], T(c)
end
if debug && u > tol_debug
@show f1, q, f2, T(c), xx[i], T(c)
end
end
rmean = rmean / length(xx)
fmtxloc = isa(xmax, Tuple) ? join(xmax, ", ") : string(xmax)
println(
rpad(f1, 18, " "),
": max ",
rmax,
rpad(" at x = " * fmtxloc, 40, " "),
": mean ",
rmean
)
t = @test trunc(rmax; digits = 1) <= tol
end
end
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 92562 | import InteractiveUtils, Aqua, ArrayInterface
InteractiveUtils.versioninfo(stdout; verbose = true)
include("testsetup.jl")
@time @testset "VectorizationBase.jl" begin
# Write your own tests here.
# Aqua.test_all(VectorizationBase, ambiguities = VERSION < v"1.6-DEV")
println("Aqua.test_all")
t0 = time_ns()
deps_compat = VERSION <= v"1.8" || isempty(VERSION.prerelease)
Aqua.test_all(VectorizationBase; deps_compat = deps_compat)
println("Aqua took $((time_ns() - t0)*1e-9) seconds")
# @test isempty(detect_unbound_args(VectorizationBase))
# @test isempty(detect_ambiguities(VectorizationBase))
W = Int(@inferred(VectorizationBase.pick_vector_width(Float64)))
Sys.WORD_SIZE == 64 &&
@test @inferred(VectorizationBase.pick_integer(Val(W))) == (
VectorizationBase.register_size() ==
VectorizationBase.simd_integer_register_size() ? Int64 : Int32
)
@test first(A) === A[1]
@test W64S == W64
println("Struct-Wrapped Vec")
@time @testset "Struct-Wrapped Vec" begin
@test data(zero(Vec{W64,Float64})) ===
ntuple(VE ∘ zero ∘ float, Val(W64)) ===
data(Vec{W64,Float64}(0.0))
@test data(one(Vec{W64,Float64})) ===
ntuple(VE ∘ one ∘ float, Val(W64)) ===
data(Vec{W64,Float64}(1.0)) ===
data(data(Vec{W64,Float64}(1.0)))
v = Vec((VE(1.0), VE(2.0), VE(3.0), VE(4.0)))
@test v ===
Vec{4,Float64}(1, 2, 3, 4) ===
conj(v) ===
v' ===
Vec{4,Float64}(v)
@test length(v) == 4 == first(size(v))
@test eltype(v) == Float64
for i = 1:4
@test i == VectorizationBase.extractelement(v, i - 1)
# @test i === Vec{4,Int}(v)[i] # should use fptosi (ie, vconvert defined in SIMDPirates).
end
@test zero(v) === zero(typeof(v))
@test one(v) === one(typeof(v))
# @test Vec{W32,Float32}(one(Vec{W32,Float64})) === Vec(one(Vec{W32,Float32})) === one(Vec{W32,Float32}) # conversions should be tested in SIMDPirates
@test Vec{1,Int}(1) === 1
vu = Vec(collect(1.0:16.0)...) + 2
@test_throws ErrorException vu.data
@test vu(1, 1) === VectorizationBase.data(vu)[1](1)
@test vu(2, 1) === VectorizationBase.data(vu)[1](2)
@test vu(1, 2) === VectorizationBase.data(vu)[2](1)
@test vu(2, 2) === VectorizationBase.data(vu)[2](2)
if W64 == 8
@test VectorizationBase.data(vu)[1] ===
Vec(3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0)
@test VectorizationBase.data(vu)[2] ===
Vec(11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0)
elseif W64 == 4
@test VectorizationBase.data(vu)[1] === Vec(3.0, 4.0, 5.0, 6.0)
@test VectorizationBase.data(vu)[2] === Vec(7.0, 8.0, 9.0, 10.0)
@test VectorizationBase.data(vu)[3] === Vec(11.0, 12.0, 13.0, 14.0)
@test VectorizationBase.data(vu)[4] === Vec(15.0, 16.0, 17.0, 18.0)
@test Vec(1.2, 3.4, 3.4) === Vec(1.2, 3.4, 3.4, 0.0)
elseif W64 == 2
@test VectorizationBase.data(vu)[1] === Vec(3.0, 4.0)
@test VectorizationBase.data(vu)[2] === Vec(5.0, 6.0)
@test VectorizationBase.data(vu)[3] === Vec(7.0, 8.0)
@test VectorizationBase.data(vu)[4] === Vec(9.0, 10.0)
@test VectorizationBase.data(vu)[5] === Vec(11.0, 12.0)
@test VectorizationBase.data(vu)[6] === Vec(13.0, 14.0)
@test VectorizationBase.data(vu)[7] === Vec(15.0, 16.0)
@test VectorizationBase.data(vu)[8] === Vec(17.0, 18.0)
end
end
println("alignment.jl")
@time @testset "alignment.jl" begin
for i ∈ 1:VectorizationBase.register_size()
@test VectorizationBase.align(i) == VectorizationBase.register_size()
end
for i ∈
1+VectorizationBase.register_size():2VectorizationBase.register_size()
@test VectorizationBase.align(i) == 2VectorizationBase.register_size()
end
for i ∈
(1:VectorizationBase.register_size()) .+
9VectorizationBase.register_size()
@test VectorizationBase.align(i) == 10VectorizationBase.register_size()
end
for i ∈ 1:VectorizationBase.register_size()
@test VectorizationBase.align(reinterpret(Ptr{Cvoid}, i)) ==
reinterpret(Ptr{Cvoid}, Int(VectorizationBase.register_size()))
end
for i ∈
1+VectorizationBase.register_size():2VectorizationBase.register_size()
@test VectorizationBase.align(reinterpret(Ptr{Cvoid}, i)) ==
reinterpret(Ptr{Cvoid}, 2Int(VectorizationBase.register_size()))
end
for i ∈
(1:VectorizationBase.register_size()) .+
19VectorizationBase.register_size()
@test VectorizationBase.align(reinterpret(Ptr{Cvoid}, i)) ==
reinterpret(Ptr{Cvoid}, 20Int(VectorizationBase.register_size()))
end
for i ∈ 1:VectorizationBase.register_size()
@test VectorizationBase.align(i, W32) ==
VectorizationBase.align(i, Float32) ==
VectorizationBase.align(i, Int32) ==
W32 * cld(i, W32)
end
for i ∈
1+VectorizationBase.register_size():2VectorizationBase.register_size()
@test VectorizationBase.align(i, W32) ==
VectorizationBase.align(i, Float32) ==
VectorizationBase.align(i, Int32) ==
W32 * cld(i, W32)
end
for i ∈
(1:VectorizationBase.register_size()) .+
29VectorizationBase.register_size()
@test VectorizationBase.align(i, W32) ==
VectorizationBase.align(i, Float32) ==
VectorizationBase.align(i, Int32) ==
W32 * cld(i, W32)
end
for i ∈ 1:VectorizationBase.register_size()
@test VectorizationBase.align(i, W64) ==
VectorizationBase.align(i, Float64) ==
VectorizationBase.align(i, Int64) ==
W64 * cld(i, W64)
end
for i ∈
1+VectorizationBase.register_size():2VectorizationBase.register_size()
@test VectorizationBase.align(i, W64) ==
VectorizationBase.align(i, Float64) ==
VectorizationBase.align(i, Int64) ==
W64 * cld(i, W64)
end
for i ∈
(1:VectorizationBase.register_size()) .+
29VectorizationBase.register_size()
@test VectorizationBase.align(i, W64) ==
VectorizationBase.align(i, Float64) ==
VectorizationBase.align(i, Int64) ==
W64 * cld(i, W64)
end
@test reinterpret(Int, VectorizationBase.align(pointer(A))) %
VectorizationBase.register_size() === 0
for i ∈ 0:VectorizationBase.register_size()-1
@test VectorizationBase.aligntrunc(i) == 0
end
for i ∈
VectorizationBase.register_size():2VectorizationBase.register_size()-1
@test VectorizationBase.aligntrunc(i) == VectorizationBase.register_size()
end
for i ∈
(0:VectorizationBase.register_size()-1) .+
9VectorizationBase.register_size()
@test VectorizationBase.aligntrunc(i) ==
9VectorizationBase.register_size()
end
for i ∈ 1:VectorizationBase.register_size()
@test VectorizationBase.aligntrunc(i, W32) ==
VectorizationBase.aligntrunc(i, Float32) ==
VectorizationBase.aligntrunc(i, Int32) ==
W32 * div(i, W32)
end
for i ∈
1+VectorizationBase.register_size():2VectorizationBase.register_size()
@test VectorizationBase.aligntrunc(i, W32) ==
VectorizationBase.aligntrunc(i, Float32) ==
VectorizationBase.aligntrunc(i, Int32) ==
W32 * div(i, W32)
end
for i ∈
(1:VectorizationBase.register_size()) .+
29VectorizationBase.register_size()
@test VectorizationBase.aligntrunc(i, W32) ==
VectorizationBase.aligntrunc(i, Float32) ==
VectorizationBase.aligntrunc(i, Int32) ==
W32 * div(i, W32)
end
for i ∈ 1:VectorizationBase.register_size()
@test VectorizationBase.aligntrunc(i, W64) ==
VectorizationBase.aligntrunc(i, Float64) ==
VectorizationBase.aligntrunc(i, Int64) ==
W64 * div(i, W64)
end
for i ∈
1+VectorizationBase.register_size():2VectorizationBase.register_size()
@test VectorizationBase.aligntrunc(i, W64) ==
VectorizationBase.aligntrunc(i, Float64) ==
VectorizationBase.aligntrunc(i, Int64) ==
W64 * div(i, W64)
end
for i ∈
(1:VectorizationBase.register_size()) .+
29VectorizationBase.register_size()
@test VectorizationBase.aligntrunc(i, W64) ==
VectorizationBase.aligntrunc(i, Float64) ==
VectorizationBase.aligntrunc(i, Int64) ==
W64 * div(i, W64)
end
a = Vector{Float64}(undef, 0)
ptr = pointer(a)
@test UInt(VectorizationBase.align(ptr, 1 << 12)) % (1 << 12) == 0
end
println("masks.jl")
@time @testset "masks.jl" begin
# @test Mask{8,UInt8}(0x0f) === @inferred Mask(0x0f)
# @test Mask{16,UInt16}(0x0f0f) === @inferred Mask(0x0f0f)
@test EVLMask{8,UInt8}(0xff, 8) === mask(Val(8), 0)
@test EVLMask{8,UInt8}(0xff, 8) === mask(Val(8), 8)
@test EVLMask{8,UInt8}(0xff, 8) === mask(Val(8), 16)
@test EVLMask{8,UInt8}(0xff, 8) ===
mask(Val(8), VectorizationBase.StaticInt(0))
@test EVLMask{16,UInt16}(0xffff, 16) === mask(Val(16), 0)
@test EVLMask{16,UInt16}(0xffff, 16) === mask(Val(16), 16)
@test EVLMask{16,UInt16}(0xffff, 16) === mask(Val(16), 32)
@test EVLMask{12,UInt16}(0x01ff, 9) === mask(Val(12), 117)
@test VectorizationBase.data(mask(Val(128), 253)) ==
0x1fffffffffffffffffffffffffffffff
@test mask(Val(128), 253) ===
EVLMask{128,UInt128}(0x1fffffffffffffffffffffffffffffff, 125)
@test EVLMask{1}(true, 1) === true
@test Mask{1}(true) === true
@test EVLMask{1}(false, 1) === false
@test Mask{1}(false) === false
@test all(w -> VectorizationBase.mask_type(w) === UInt8, 1:8)
@test all(w -> VectorizationBase.mask_type(w) === UInt16, 9:16)
@test all(w -> VectorizationBase.mask_type(w) === UInt32, 17:32)
@test all(w -> VectorizationBase.mask_type(w) === UInt64, 33:64)
@test all(w -> VectorizationBase.mask_type(w) === UInt128, 65:128)
if VectorizationBase.register_size() == 64 # avx512
# @test VectorizationBase.mask_type(Float16) === UInt32
@test VectorizationBase.mask_type(Float32) === UInt16
@test VectorizationBase.mask_type(Float64) === UInt8
# @test VectorizationBase.max_mask(Float16) === 0xffffffff # 32
@test data(VectorizationBase.max_mask(Float32)) === 0xffff # 16
@test data(VectorizationBase.max_mask(Float64)) === 0xff # 8
elseif VectorizationBase.register_size() == 32 # avx or avx2
# @test VectorizationBase.mask_type(Float16) === UInt16
@test VectorizationBase.mask_type(Float32) === UInt8
@test VectorizationBase.mask_type(Float64) === UInt8
# @test VectorizationBase.max_mask(Float16) === 0xffff # 16
@test data(VectorizationBase.max_mask(Float32)) === 0xff # 8
@test data(VectorizationBase.max_mask(Float64)) === 0x0f # 4
elseif VectorizationBase.register_size() == 16 # sse
# @test VectorizationBase.mask_type(Float16) === UInt8
@test VectorizationBase.mask_type(Float32) === UInt8
@test VectorizationBase.mask_type(Float64) === UInt8
# @test VectorizationBase.max_mask(Float16) === 0xff # 8
@test data(VectorizationBase.max_mask(Float32)) === 0x0f # 4
@test data(VectorizationBase.max_mask(Float64)) === 0x03 # 2
end
@test all(
w ->
bitstring(VectorizationBase.mask(Val(8), w)) ==
reduce(*, (8 - i < w ? "1" : "0" for i = 1:8)),
1:8
)
@test all(
w ->
bitstring(VectorizationBase.mask(Val(16), w)) ==
reduce(*, (16 - i < w ? "1" : "0" for i = 1:16)),
1:16
)
@test all(
w ->
VectorizationBase.mask(Float64, w) === VectorizationBase.mask(
@inferred(VectorizationBase.pick_vector_width(Float64)),
w
),
1:W64
)
@test VectorizationBase.vbroadcast(Val(8), true) === Mask{8}(0xff)
@test !VectorizationBase.vall(Mask{8}(0xfc))
@test !VectorizationBase.vall(Mask{4}(0xfc))
@test VectorizationBase.vall(EVLMask{8}(0xff, 8))
@test !VectorizationBase.vall(EVLMask{8}(0x1f, 5))
@test VectorizationBase.vall(Mask{4}(0xcf))
@test VectorizationBase.vany(Mask{8}(0xfc))
@test VectorizationBase.vany(Mask{4}(0xfc))
@test !VectorizationBase.vany(Mask{8}(0x00))
@test !VectorizationBase.vany(Mask{4}(0xf0))
@test VectorizationBase.vall(
Mask{8}(0xfc) + Mask{8}(0xcf) ==
Vec(0x01, 0x01, 0x02, 0x02, 0x01, 0x01, 0x02, 0x02)
)
@test VectorizationBase.vall(
Mask{4}(0xfc) + Mask{4}(0xcf) == Vec(0x01, 0x01, 0x02, 0x02)
)
@test VectorizationBase.vall(
Mask{8}(0xcf) + EVLMask{8}(0x1f, 5) ==
Vec(0x02, 0x02, 0x02, 0x02, 0x01, 0x00, 0x01, 0x01)
)
@test VectorizationBase.vall(Mask{8}(0xec) != Mask{8}(0x13))
@test VectorizationBase.vall(
(!Mask{8}(0xac) & Mask{8}(0xac)) == Mask{8}(0x00)
)
@test !VectorizationBase.vany((!Mask{8}(0xac) & Mask{8}(0xac)))
@test VectorizationBase.vall(
(!Mask{8}(0xac) | Mask{8}(0xac)) == Mask{8}(0xff)
)
@test VectorizationBase.vall((!Mask{8}(0xac) | Mask{8}(0xac)))
@test VectorizationBase.vall(
VectorizationBase.splitint(0xb53a5d6426a9d29d, Int8) ==
Vec{8,Int8}(-99, -46, -87, 38, 100, 93, 58, -75)
)
# other splitint tests for completeness sake
@test VectorizationBase.splitint(0xb53a5d6426a9d29d, Int64) ===
0xb53a5d6426a9d29d
@test VectorizationBase.splitint(0xff, UInt16) === 0x00ff
@test !VectorizationBase.vany(
VectorizationBase.splitint(0x47766b9a9509d175acd77ff497236795, Int8) !=
Vec{16,Int8}(
-107,
103,
35,
-105,
-12,
127,
-41,
-84,
117,
-47,
9,
-107,
-102,
107,
118,
71
)
)
@test (EVLMask{8}(0x1f, 5) | EVLMask{8}(0x03, 3)) === EVLMask{8}(0x1f, 5)
@test (Mask{8}(0x1f) | EVLMask{8}(0x03, 3)) === Mask{8}(0x1f)
@test (EVLMask{8}(0x1f, 5) | Mask{8}(0x03)) === Mask{8}(0x1f)
@test (Mask{8}(0x1f) | Mask{8}(0x03)) === Mask{8}(0x1f)
@test (EVLMask{8}(0x1f, 5) & EVLMask{8}(0x03, 3)) === EVLMask{8}(0x03, 3)
@test (Mask{8}(0x1f) & EVLMask{8}(0x03, 3)) === Mask{8}(0x03)
@test (EVLMask{8}(0x1f, 5) & Mask{8}(0x03)) === Mask{8}(0x03)
@test (Mask{8}(0x1f) & Mask{8}(0x03)) === Mask{8}(0x03)
@test (Mask{8}(0xac) | false) === Mask{8}(0xac)
@test (Mask{8}(0xac) | true) === Mask{8}(0xff)
@test (false | Mask{8}(0xac)) === Mask{8}(0xac)
@test (true | Mask{8}(0xac)) === Mask{8}(0xff)
@test (Mask{8}(0xac) & false) === Mask{8}(0x00)
@test (Mask{8}(0xac) & true) === Mask{8}(0xac)
@test (false & Mask{8}(0xac)) === Mask{8}(0x00)
@test (true & Mask{8}(0xac)) === Mask{8}(0xac)
@test (Mask{8}(0xac) ⊻ false) === Mask{8}(0xac)
@test (Mask{8}(0xac) ⊻ true) === Mask{8}(0x53)
@test (false ⊻ Mask{8}(0xac)) === Mask{8}(0xac)
@test (true ⊻ Mask{8}(0xac)) === Mask{8}(0x53)
@test (Mask{4}(0x05) | true) === Mask{4}(0x0f)
@test (Mask{4}(0x05) | false) === Mask{4}(0x05)
@test (true | Mask{4}(0x05)) === Mask{4}(0x0f)
@test (false | Mask{4}(0x05)) === Mask{4}(0x05)
for T ∈ [UInt8, UInt16, UInt32]
Ws = T === UInt8 ? [2, 4, 8] : [8sizeof(T)]
for W ∈ Ws
u1 = rand(T)
u2 = rand(T)
m = ~(typemax(T) << W)
@test (Mask{W}(u1) & Mask{W}(u2)) === Mask{W}((u1 & u2) & m)
@test (Mask{W}(u1) | Mask{W}(u2)) === Mask{W}((u1 | u2) & m)
@test (Mask{W}(u1) ⊻ Mask{W}(u2)) === Mask{W}((u1 ⊻ u2) & m)
end
end
@test convert(Bool, Mask{8}(0xec)) ===
Vec(false, false, true, true, false, true, true, true) ===
convert(
Bool,
VectorizationBase.ifelse(
convert(Bool, Mask{8}(0xec)),
vbroadcast(Val(8), true),
vbroadcast(Val(8), false)
)
)
@test (MM{8}(2) ∈ 3:8) === Mask{8}(0x7e)
fbitvector1 = falses(20)
fbitvector2 = falses(20)
mu = VectorizationBase.VecUnroll((Mask{4}(0x0f), Mask{4}(0x0f)))
GC.@preserve fbitvector1 fbitvector2 begin
vstore!(
stridedpointer(fbitvector1),
mu,
(VectorizationBase.MM(StaticInt{8}(), 1),)
)
vstore!(
stridedpointer(fbitvector2),
mu,
(VectorizationBase.MM(StaticInt{8}(), 1),),
Mask{8}(0x7e)
)
vstore!(
stridedpointer(fbitvector1),
mu,
Unroll{1,4,2,1,4,zero(UInt),1}((9,))
)
vstore!(
stridedpointer(fbitvector2),
mu,
Unroll{1,4,2,1,4,2 % UInt,1}((9,)),
Mask{4}(0x03)
)
end
@test all(fbitvector1[1:16])
@test !any(fbitvector1[17:end])
@test !fbitvector2[1]
@test all(fbitvector2[2:7])
@test !fbitvector2[8]
@test all(fbitvector2[9:14])
@test !any(fbitvector2[15:end])
@test convert(Mask{4,UInt8}, true) === EVLMask{4}(0x0f, 4)
@test convert(Mask{4,UInt8}, false) === EVLMask{4}(0x00, 0)
@test convert(Mask{4}, true) === EVLMask{4}(0x0f, 4)
@test convert(Mask{4}, false) === EVLMask{4}(0x00, 0)
@test convert(Mask{16,UInt16}, true) === EVLMask{16}(0xffff, 16)
@test convert(Mask{16,UInt16}, false) === EVLMask{16}(0x0000, 0)
@test convert(Mask{16}, true) === EVLMask{16}(0xffff, 16)
@test convert(Mask{16}, false) === EVLMask{16}(0x0000, 0)
@test convert(EVLMask{4,UInt8}, true) === EVLMask{4}(0x0f, 4)
@test convert(EVLMask{4,UInt8}, false) === EVLMask{4}(0x00, 0)
@test convert(EVLMask{4}, true) === EVLMask{4}(0x0f, 4)
@test convert(EVLMask{4}, false) === EVLMask{4}(0x00, 0)
@test convert(EVLMask{16,UInt16}, true) === EVLMask{16}(0xffff, 16)
@test convert(EVLMask{16,UInt16}, false) === EVLMask{16}(0x0000, 0)
@test convert(EVLMask{16}, true) === EVLMask{16}(0xffff, 16)
@test convert(EVLMask{16}, false) === EVLMask{16}(0x0000, 0)
end
# @testset "number_vectors.jl" begin
# # eval(VectorizationBase.num_vector_load_expr(@__MODULE__, :(size(A)), 8)) # doesn't work?
# @test VectorizationBase.length_loads(A, Val(8)) == eval(VectorizationBase.num_vector_load_expr(@__MODULE__, :((() -> 13*17)()), 8)) == eval(VectorizationBase.num_vector_load_expr(@__MODULE__, 13*17, 8)) == divrem(length(A), 8)
# @test VectorizationBase.size_loads(A,1, Val(8)) == eval(VectorizationBase.num_vector_load_expr(@__MODULE__, :((() -> 13 )()), 8)) == eval(VectorizationBase.num_vector_load_expr(@__MODULE__, 13 , 8)) == divrem(size(A,1), 8)
# @test VectorizationBase.size_loads(A,2, Val(8)) == eval(VectorizationBase.num_vector_load_expr(@__MODULE__, :((() -> 17)()), 8)) == eval(VectorizationBase.num_vector_load_expr(@__MODULE__, 17, 8)) == divrem(size(A,2), 8)
# end
println("vector_width.jl")
@time @testset "vector_width.jl" begin
for T ∈ (Float32, Float64)
@test @inferred(VectorizationBase.pick_vector_width(T)) *
@inferred(VectorizationBase.static_sizeof(T)) ===
@inferred(VectorizationBase.register_size(T)) ===
@inferred(VectorizationBase.register_size())
end
for T ∈ (Int8, Int16, Int32, Int64, UInt8, UInt16, UInt32, UInt64)
@test @inferred(VectorizationBase.pick_vector_width(T)) *
@inferred(VectorizationBase.static_sizeof(T)) ===
@inferred(VectorizationBase.register_size(T)) ===
@inferred(VectorizationBase.simd_integer_register_size())
end
@test VectorizationBase.static_sizeof(BigFloat) ===
VectorizationBase.static_sizeof(Int)
@test VectorizationBase.static_sizeof(Float32) ===
VectorizationBase.static_sizeof(Int32) ===
VectorizationBase.StaticInt(4)
@test @inferred(VectorizationBase.pick_vector_width(Float16)) ===
@inferred(VectorizationBase.pick_vector_width(Float32))
@test @inferred(
VectorizationBase.pick_vector_width(
Float64,
Int32,
Float64,
Float32,
Float64
)
) * VectorizationBase.static_sizeof(Float64) ===
@inferred(VectorizationBase.register_size())
@test @inferred(VectorizationBase.pick_vector_width(Float64, Int32)) *
VectorizationBase.static_sizeof(Float64) ===
@inferred(VectorizationBase.register_size())
@test @inferred(VectorizationBase.pick_vector_width(Float32, Float32)) *
VectorizationBase.static_sizeof(Float32) ===
@inferred(VectorizationBase.register_size())
@test @inferred(VectorizationBase.pick_vector_width(Float32, Int32)) *
VectorizationBase.static_sizeof(Float32) ===
@inferred(VectorizationBase.simd_integer_register_size())
@test all(VectorizationBase._ispow2, 0:1)
@test all(
i ->
!any(VectorizationBase._ispow2, 1+(1<<(i-1)):(1<<i)-1) &&
VectorizationBase._ispow2(1 << i),
2:9
)
@test all(
i -> VectorizationBase.intlog2(1 << i) == i,
0:(Int == Int64 ? 53 : 30)
)
FTypes = (Float32, Float64)
Wv = ntuple(
i -> @inferred(VectorizationBase.register_size()) >> (i + 1),
Val(2)
)
for (T, N) in zip(FTypes, Wv)
W = @inferred(VectorizationBase.pick_vector_width(T))
# @test Vec{Int(W),T} == VectorizationBase.pick_vector(W, T) == VectorizationBase.pick_vector(T)
@test W == @inferred(VectorizationBase.pick_vector_width(W, T))
@test W ===
@inferred(VectorizationBase.pick_vector_width(W, T)) ==
@inferred(VectorizationBase.pick_vector_width(T))
while true
W >>= VectorizationBase.One()
W == 0 && break
W2, Wshift2 = @inferred(VectorizationBase.pick_vector_width_shift(W, T))
@test W2 ==
VectorizationBase.One() << Wshift2 ==
@inferred(VectorizationBase.pick_vector_width(W, T)) ==
VectorizationBase.pick_vector_width(Val(Int(W)), T) ==
W
@test StaticInt(W) ===
VectorizationBase.pick_vector_width(Val(Int(W)), T) ===
VectorizationBase.pick_vector_width(W, T)
for n = W+1:2W
W3, Wshift3 =
VectorizationBase.pick_vector_width_shift(StaticInt(n), T)
@test W2 << 1 ==
W3 ==
1 << (Wshift2 + 1) ==
1 << Wshift3 ==
VectorizationBase.pick_vector_width(StaticInt(n), T) ==
VectorizationBase.pick_vector_width(Val(n), T) ==
W << 1
# @test VectorizationBase.pick_vector(W, T) == VectorizationBase.pick_vector(W, T) == Vec{Int(W),T}
end
end
end
# @test VectorizationBase.nextpow2(0) == 1
@test all(i -> VectorizationBase.nextpow2(i) == i, 0:2)
for j = 1:10
l, u = (1 << j) + 1, 1 << (j + 1)
@test all(i -> VectorizationBase.nextpow2(i) == u, l:u)
end
end
println("Memory")
@time @testset "Memory" begin
C = rand(40, 20, 10) .> 0
mtest = vload(stridedpointer(C), ((MM{16})(9), 2, 3, 1))
@test VectorizationBase.offsetprecalc(stridedpointer(C), Val((5, 5))) ===
VectorizationBase.offsetprecalc(
VectorizationBase.offsetprecalc(stridedpointer(C), Val((5, 5))),
Val((3, 3))
)
@test VectorizationBase.bytestrides(
VectorizationBase.offsetprecalc(stridedpointer(C), Val((5, 5)))
) === VectorizationBase.bytestrides(stridedpointer(C))
@test VectorizationBase.bytestrides(C) ===
VectorizationBase.bytestrides(stridedpointer(C))
v1 = C[9:24, 2, 3]
@test tovector(mtest) == v1
@test [vload(stridedpointer(C), (1 + w, 2 + w, 3)) for w ∈ 1:W64] == getindex.(Ref(C), 1 .+ (1:W64), 2 .+ (1:W64), 3)
vstore!(stridedpointer(C), !mtest, ((MM{16})(17), 3, 4))
@test .!v1 ==
C[17:32, 3, 4] ==
tovector(vload(stridedpointer(C), ((MM{16})(17), 3, 4)))
dims = (41, 42, 43) .* 3
# dims = (41,42,43);
A = reshape(collect(Float64(0):Float64(prod(dims) - 1)), dims)
P = PermutedDimsArray(A, (3, 1, 2))
O = OffsetArray(P, (-4, -2, -3))
indices = (
StaticInt{1}(),
StaticInt{2}(),
2,
MM{W64}(2),
MM{W64,2}(3),
MM{W64,-1}(W64 + 2),
Vec(ntuple(i -> 2i + 1, Val(W64))...)#,
# VectorizationBase.LazyMulAdd{2,-1}(MM{W64}(3))#, VectorizationBase.LazyMulAdd{2,-2}(Vec(ntuple(i -> 2i + 1, Val(W64))...))
)
println("LazyMulAdd Loads/Stores")
@time @testset "LazyMulAdd Loads/Stores" begin
max_const = 2
for _i ∈ indices,
_j ∈ indices,
_k ∈ indices,
im ∈ 1:max_const,
jm ∈ 1:max_const,
km ∈ 1:max_const,
B ∈ (A, P, O)
i = @inferred(VectorizationBase.lazymul(StaticInt(im), _i))
j = @inferred(VectorizationBase.lazymul(StaticInt(jm), _j))
k = @inferred(VectorizationBase.lazymul(StaticInt(km), _k))
iv = tovector(i)
jv = tovector(j)
kv = tovector(k)
if B === C
off = 9 - iv[1] % 8
iv += off
i += off
end
# @show typeof(B), i, j, k (im, _i), (jm, _j), (km, _k)
x = getindex.(Ref(B), iv, jv, kv)
GC.@preserve B begin
# @show i,j,k, typeof(B)
v = @inferred(vload(stridedpointer(B), (i, j, k)))
end
@test x == tovector(v)
if length(x) > 1
m = Mask{W64}(rand(UInt8))
mv = tovector(m)
x .*= mv
GC.@preserve B begin
v = @inferred(vload(stridedpointer(B), (i, j, k), m))
end
@test x == tovector(v)
end
for store! ∈ (vstore!, VectorizationBase.vnoaliasstore!)
y = isone(length(x)) ? randn() : randnvec(length(x))
GC.@preserve B store!(stridedpointer(B), y, (i, j, k))
x = getindex.(Ref(B), iv, jv, kv)
# @show i, j, k typeof.((i, j, k)), store!, typeof(B) y
@test x == tovector(y)
if length(x) > 1
z = Vec(ntuple(_ -> Core.VecElement(randn()), length(x)))
GC.@preserve B store!(stridedpointer(B), z, (i, j, k), m)
y = getindex.(Ref(B), iv, jv, kv)
@test y == ifelse.(mv, tovector(z), x)
end
end
end
end
println("VecUnroll Loads/Stores")
@time @testset "VecUnroll Loads/Stores" begin
for AU ∈ 1:3,
B ∈ (A, P, O),
i ∈ (StaticInt(1), 2, StaticInt(2)),
j ∈ (StaticInt(1), 3, StaticInt(3)),
k ∈ (StaticInt(1), 4, StaticInt(4))
# @show AU, typeof(B), i, j, k
for AV ∈ 1:3
v1 = randnvec()
v2 = randnvec()
v3 = randnvec()
GC.@preserve B begin
if AU == AV
vstore!(
VectorizationBase.offsetprecalc(
stridedpointer(B),
Val((5, 5, 5))
),
VectorizationBase.VecUnroll((v1, v2, v3)),
VectorizationBase.Unroll{AU,W64,3,AV,W64,zero(UInt)}((i, j, k))
)
vu = @inferred(
vload(
stridedpointer(B),
VectorizationBase.Unroll{AU,W64,3,AV,W64,zero(UInt)}((
i,
j,
k
))
)
)
else
vstore!(
stridedpointer(B),
VectorizationBase.VecUnroll((v1, v2, v3)),
VectorizationBase.Unroll{AU,1,3,AV,W64,zero(UInt)}((i, j, k))
)
vu = @inferred(
vload(
stridedpointer(B),
VectorizationBase.Unroll{AU,1,3,AV,W64,zero(UInt)}((i, j, k))
)
)
end
end
@test v1 === VectorizationBase.data(vu)[1]
@test v2 === VectorizationBase.data(vu)[2]
@test v3 === VectorizationBase.data(vu)[3]
ir = 0:(AV == 1 ? W64 - 1 : 0)
jr = 0:(AV == 2 ? W64 - 1 : 0)
kr = 0:(AV == 3 ? W64 - 1 : 0)
x1 = getindex.(Ref(B), i .+ ir, j .+ jr, k .+ kr)
if AU == 1
ir = ir .+ length(ir)
elseif AU == 2
jr = jr .+ length(jr)
elseif AU == 3
kr = kr .+ length(kr)
end
x2 = getindex.(Ref(B), i .+ ir, j .+ jr, k .+ kr)
if AU == 1
ir = ir .+ length(ir)
elseif AU == 2
jr = jr .+ length(jr)
elseif AU == 3
kr = kr .+ length(kr)
end
x3 = getindex.(Ref(B), i .+ ir, j .+ jr, k .+ kr)
@test x1 == tovector(VectorizationBase.data(vu)[1])
@test x2 == tovector(VectorizationBase.data(vu)[2])
@test x3 == tovector(VectorizationBase.data(vu)[3])
end
v1 = randnvec()
v2 = randnvec()
v3 = randnvec()
v4 = randnvec()
v5 = randnvec()
GC.@preserve B begin
vstore!(
VectorizationBase.vsum,
stridedpointer(B),
VectorizationBase.VecUnroll((v1, v2, v3, v4, v5)),
VectorizationBase.Unroll{AU,1,5,0,1,zero(UInt)}((i, j, k))
)
end
ir = 0:(AU == 1 ? 4 : 0)
jr = 0:(AU == 2 ? 4 : 0)
kr = 0:(AU == 3 ? 4 : 0)
xvs = getindex.(Ref(B), i .+ ir, j .+ jr, k .+ kr)
@test xvs ≈ map(VectorizationBase.vsum, [v1, v2, v3, v4, v5])
end
end
x = Vector{Int}(undef, 100)
i = MM{1}(0)
for j ∈ 1:25
VectorizationBase.__vstore!(
pointer(x),
j,
(i * VectorizationBase.static_sizeof(Int)),
VectorizationBase.False(),
VectorizationBase.False(),
VectorizationBase.False(),
VectorizationBase.register_size()
)
i += 1
end
for j ∈ 26:50
VectorizationBase.__vstore!(
pointer(x),
j,
(VectorizationBase.static_sizeof(Int) * i),
Mask{1}(0xff),
VectorizationBase.False(),
VectorizationBase.False(),
VectorizationBase.False(),
VectorizationBase.register_size()
)
i += 1
end
for j ∈ 51:75
VectorizationBase.__vstore!(
pointer(x),
j,
VectorizationBase.lazymul(i, VectorizationBase.static_sizeof(Int)),
VectorizationBase.False(),
VectorizationBase.False(),
VectorizationBase.False(),
VectorizationBase.register_size()
)
i += 1
end
for j ∈ 76:100
VectorizationBase.__vstore!(
pointer(x),
j,
VectorizationBase.lazymul(VectorizationBase.static_sizeof(Int), i),
Mask{1}(0xff),
VectorizationBase.False(),
VectorizationBase.False(),
VectorizationBase.False(),
VectorizationBase.register_size()
)
i += 1
end
@test x == 1:100
let ind4 =
VectorizationBase.Unroll{1,Int(W64),4,1,Int(W64),zero(UInt)}((1,)),
xf64 = rand(100),
xf16 = rand(Float16, 32)
# indu = VectorizationBase.VecUnroll((MM{W64}(1,), MM{W64}(1+W64,), MM{W64}(1+2W64,), MM{W64}(1+3W64,)))
GC.@preserve xf64 begin
vxtu = @inferred(vload(stridedpointer(xf64), ind4))
@test vxtu isa VectorizationBase.VecUnroll{
3,
Int(W64),
Float64,
Vec{Int(W64),Float64}
}
vxtutv = tovector(vxtu)
vxtutvmult = 3.5 .* vxtutv
@inferred(vstore!(stridedpointer(xf64), 3.5 * vxtu, ind4))
@test tovector(@inferred(vload(stridedpointer(xf64), ind4))) ==
vxtutvmult
mbig = Mask{4W64}(rand(UInt32)) # TODO: update if any arches support >512 bit vectors
mbigtv = tovector(mbig)
ubig = VectorizationBase.VecUnroll(
VectorizationBase.splitvectortotuple(StaticInt(4), W64S, mbig)
)
# @test tovector(@inferred(vload(stridedpointer(xf64), indu, ubig))) == ifelse.(mbigtv, vxtutvmult, 0.0)
@test tovector(@inferred(vload(stridedpointer(xf64), ind4, ubig))) ==
ifelse.(mbigtv, vxtutvmult, 0.0)
# @inferred(vstore!(stridedpointer(xf64), -11 * vxtu, indu, ubig));
# @test tovector(@inferred(vload(stridedpointer(xf64), ind4))) == ifelse.(mbigtv, -11 .* vxtutv, vxtutvmult)
@inferred(vstore!(stridedpointer(xf64), -77 * vxtu, ind4, ubig))
@test tovector(@inferred(vload(stridedpointer(xf64), ind4))) ==
ifelse.(mbigtv, -77 .* vxtutv, vxtutvmult)
vxf16 = @inferred(vload(stridedpointer(xf16), ind4))
@test vxf16 isa VectorizationBase.VecUnroll{
3,
Int(W64),
Float16,
Vec{Int(W64),Float16}
}
@test tovector(vxf16) == view(xf16, 1:(4*W64))
end
end
colors = [(R = rand(), G = rand(), B = rand()) for i ∈ 1:100]
colormat = reinterpret(reshape, Float64, colors)
sp = stridedpointer(colormat)
GC.@preserve colors begin
@test tovector(
@inferred(
vload(sp, VectorizationBase.Unroll{1,1,3,2,8,zero(UInt)}((1, 9)))
)
) == vec(colormat[:, 9:16]')
vu = @inferred(
vload(sp, VectorizationBase.Unroll{1,1,3,2,8,zero(UInt)}((1, 41)))
)
@inferred(
vstore!(sp, vu, VectorizationBase.Unroll{1,1,3,2,8,zero(UInt)}((1, 1)))
)
end
@test vec(colormat[:, 41:48]) == vec(colormat[:, 1:8])
end
println("Grouped Strided Pointers")
@time @testset "Grouped Strided Pointers" begin
M, K, N = 4, 5, 6
A = Matrix{Float64}(undef, M, K)
B = Matrix{Float64}(undef, K, N)
C = Matrix{Float64}(undef, M, N)
struct SizedWrapper{M,N,T,AT<:AbstractMatrix{T}} <: AbstractMatrix{T}
A::AT
end
ArrayInterface.is_forwarding_wrapper(::Type{<:SizedWrapper}) = true
SizedWrapper{M,N}(A::AT) where {M,N,T,AT<:AbstractMatrix{T}} =
SizedWrapper{M,N,T,AT}(A)
Base.size(::SizedWrapper{M,N}) where {M,N} = (M, N)
VectorizationBase.static_size(::SizedWrapper{M,N}) where {M,N} =
(StaticInt(M), StaticInt(N))
Base.getindex(A::SizedWrapper, i...) = getindex(parent(A), i...)
Base.parent(dw::SizedWrapper) = dw.A
VectorizationBase.ArrayInterface.parent_type(
::Type{SizedWrapper{M,N,T,AT}}
) where {M,N,T,AT} = AT
VectorizationBase.memory_reference(dw::SizedWrapper) =
VectorizationBase.memory_reference(parent(dw))
VectorizationBase.contiguous_axis(::Type{A}) where {A<:SizedWrapper} =
VectorizationBase.contiguous_axis(
VectorizationBase.ArrayInterface.parent_type(A)
)
VectorizationBase.contiguous_batch_size(dw::SizedWrapper) =
VectorizationBase.contiguous_batch_size(parent(dw))
VectorizationBase.stride_rank(::Type{A}) where {A<:SizedWrapper} =
VectorizationBase.stride_rank(
VectorizationBase.ArrayInterface.parent_type(A)
)
VectorizationBase.offsets(dw::SizedWrapper) =
VectorizationBase.offsets(parent(dw))
VectorizationBase.val_dense_dims(dw::SizedWrapper{T,N}) where {T,N} =
VectorizationBase.val_dense_dims(parent(dw))
VectorizationBase.ArrayInterface.is_forwarding_wrapper(
::Type{<:SizedWrapper}
) = true
function VectorizationBase.static_strides(
dw::SizedWrapper{M,N,T}
) where {M,N,T}
x1 = StaticInt(1)
if VectorizationBase.val_stride_rank(dw) === Val((1, 2))
return x1, x1 * StaticInt{M}()
else#if VectorizationBase.val_stride_rank(dw) === Val((2,1))
return x1 * StaticInt{N}(), x1
end
end
GC.@preserve A B C begin
fs = (false, true)#[identity, adjoint]
for ai ∈ fs, bi ∈ fs, ci ∈ fs
At = ai ? A : (similar(A')')
Bt = bi ? B : (similar(B')')
Ct = ci ? C : (similar(C')')
spdw = VectorizationBase.DensePointerWrapper{(true, true)}(
VectorizationBase.stridedpointer(At)
)
gsp, pres = @inferred(
VectorizationBase.grouped_strided_pointer(
(spdw, Bt, Ct),
Val{(((1, 1), (3, 1)), ((1, 2), (2, 1)), ((2, 2), (3, 2)))}()
)
)
if ai === ci
@test sizeof(gsp.strides) == 2sizeof(Int)
end
# Test to confirm that redundant strides are not stored in the grouped strided pointer
@test sizeof(gsp) ==
sizeof(Int) * (6 - (ai & ci) - ((!ai) & bi) - ((!bi) & (!ci)))
@test sizeof(gsp.offsets) == 0
pA, pB, pC = @inferred(VectorizationBase.stridedpointers(gsp))
@test pA === stridedpointer(At)
@test pB === stridedpointer(Bt)
@test pC === stridedpointer(Ct)
Btsw = SizedWrapper{K,N}(Bt)
gsp2, pres2 = @inferred(
VectorizationBase.grouped_strided_pointer(
(At, Btsw, Ct),
Val{(((1, 1), (3, 1)), ((1, 2), (2, 1)), ((2, 2), (3, 2)))}()
)
)
@test sizeof(gsp2) ==
sizeof(Int) * (5 - (ai & ci) - ((!ai) & bi) - ((!bi) & (!ci)))
pA2, pB2, pC2 = @inferred(VectorizationBase.stridedpointers(gsp2))
@test pointer(pA2) == pointer(At)
@test pointer(pB2) == pointer(Bt)
@test pointer(pC2) == pointer(Ct)
@test strides(pA2) == strides(pA)
@test strides(pB2) == strides(pB)
@test strides(pC2) == strides(pC)
end
end
data_in_large = Array{Float64}(undef, 4, 4, 4, 4, 1)
data_in = view(data_in_large, :, 1, :, :, 1)
tmp1 = Array{Float64}(undef, 4, 4, 4)
sp_data_in, sp_tmp1 = VectorizationBase.stridedpointers(
VectorizationBase.grouped_strided_pointer(
(data_in, tmp1),
Val((((1, 1), (2, 1)),))
)[1]
)
@test sp_data_in === stridedpointer(data_in)
@test sp_tmp1 === stridedpointer(tmp1)
end
println("Adjoint VecUnroll")
@time @testset "Adjoint VecUnroll" begin
W = W64
while W > 1
A = rand(W, W)
B = similar(A)
GC.@preserve A B begin
vut = @inferred(
vload(stridedpointer(A), VectorizationBase.Unroll{2,1,W,1,W}((1, 1)))
)
vu = @inferred(VectorizationBase.transpose_vecunroll(vut))
@test vu === @inferred(
vload(
stridedpointer(A'),
VectorizationBase.Unroll{2,1,W,1,W}((1, 1))
)
)
@test vu === @inferred(
vload(stridedpointer(A), VectorizationBase.Unroll{1,1,W,2,W}((1, 1)))
)
vstore!(
stridedpointer(B),
vu,
VectorizationBase.Unroll{2,1,W,1,W}((1, 1))
)
end
@test A == B'
W >>= 1
end
W = 2W64
while W > 1
A = rand(Float32, W, W)
B = similar(A)
GC.@preserve A B begin
vut = @inferred(
vload(stridedpointer(A), VectorizationBase.Unroll{2,1,W,1,W}((1, 1)))
)
vu = @inferred(VectorizationBase.transpose_vecunroll(vut))
@test vu === @inferred(
vload(
stridedpointer(A'),
VectorizationBase.Unroll{2,1,W,1,W}((1, 1))
)
)
@test vu === @inferred(
vload(stridedpointer(A), VectorizationBase.Unroll{1,1,W,2,W}((1, 1)))
)
vstore!(
stridedpointer(B),
vu,
VectorizationBase.Unroll{2,1,W,1,W}((1, 1))
)
end
@test A == B'
W >>= 1
end
end
println("Unary Functions")
@time @testset "Unary Functions" begin
for T ∈ (Float32, Float64)
for f ∈ [floatmin, floatmax, typemin, typemax]
@test f(Vec{Int(pick_vector_width(T)),T}) ===
Vec(ntuple(_ -> f(T), pick_vector_width(T))...)
end
v = let W = VectorizationBase.pick_vector_width(T)
VectorizationBase.VecUnroll((
Vec(ntuple(_ -> (randn(T)), W)...),
Vec(ntuple(_ -> (randn(T)), W)...),
Vec(ntuple(_ -> (randn(T)), W)...)
))
end
x = tovector(v)
for f ∈ [
-,
abs,
inv,
floor,
ceil,
trunc,
round,
VectorizationBase.relu,
abs2,
Base.FastMath.abs2_fast,
Base.FastMath.sub_fast,
sign
]
# @show T, f
@test tovector(@inferred(f(v))) == map(f, x)
end
# test fallbacks
for (vf, bf) ∈ [
(VectorizationBase.vinv, inv),
(VectorizationBase.vabs, abs),
(VectorizationBase.vround, round),
(VectorizationBase.vsub, -),
(VectorizationBase.vsub_fast, Base.FastMath.sub_fast)
]
for i ∈ -5:5
@test vf(i) == bf(i)
end
for i ∈ -3.0:0.1:3.0
@test vf(i) == bf(i)
end
end
vxabs = abs(v * 1000)
vxabsvec = tovector(vxabs)
@test tovector(exponent(vxabs)) == exponent.(vxabsvec)
@test tovector(significand(vxabs)) == significand.(vxabsvec)
@test tovector(exponent(inv(vxabs))) == exponent.(inv.(vxabsvec))
@test tovector(significand(inv(vxabs))) == significand.(inv.(vxabsvec))
@test v^2 === v^StaticInt(2) === v * v
@test v^3 === v^StaticInt(3) === (v * v) * v
@test v^4 === v^StaticInt(4) === abs2(abs2(v))
@test v^5 === v^StaticInt(5) === abs2(abs2(v)) * v
@test v^6 === v^StaticInt(6) === abs2(abs2(v)) * abs2(v)
# Don't require exact, but `eps(T)` seems like a reasonable `rtol`, at least on AVX512 systems:
# function relapprox(x::AbstractVector{T},y) where {T}
# t = max(norm(x),norm(y)) * eps(T)
# n = norm(x .- y)
# n / t
# end
# function randapprox(::Type{T}) where {T}
# x = Vec(ntuple(_ -> 10randn(T), VectorizationBase.pick_vector_width(T))...)
# via = @fastmath inv(x)
# vir = inv(x)
# relapprox(tovector(via), tovector(vir))
# end
# f32t = map(_ -> randapprox(Float32), 1:1_000_000);
# f64t = map(_ -> randapprox(Float64), 1:1_000_000);
# summarystats(f64t)
# summarystats(f32t)
# for now, I'll use `4eps(T)` if the systems don't have AVX512, but should check to set a stricter bound.
# also put `sqrt ∘ abs` in here
let rtol =
eps(T) *
(Bool(VectorizationBase.has_feature(Val(:x86_64_avx512f))) ? 1 : 4) # more accuracte
@test isapprox(
tovector(@inferred(Base.FastMath.inv_fast(v))),
map(Base.FastMath.inv_fast, x),
rtol = rtol
)
let f = sqrt ∘ abs
if T === Float32
@test isapprox(tovector(@inferred(f(v))), map(f, x), rtol = rtol)
elseif T === Float64 # exact with `Float64`
@test tovector(@inferred(f(v))) == map(f, x)
end
end
end
for f ∈ [floor, ceil, trunc, round]
@test tovector(@inferred(f(Int32, v))) == map(y -> f(Int32, y), x)
@test tovector(@inferred(f(Int64, v))) == map(y -> f(Int64, y), x)
end
invtol =
Bool(VectorizationBase.has_feature(Val(:x86_64_avx512f))) ? 2^-14 :
1.5 * 2^-12 # moreaccurate with AVX512
@test isapprox(
tovector(@inferred(VectorizationBase.inv_approx(v))),
map(VectorizationBase.inv_approx, x),
rtol = invtol
)
end
int =
Bool(VectorizationBase.has_feature(Val(:x86_64_avx512dq))) ? Int : Int32
int2 = Bool(VectorizationBase.has_feature(Val(:x86_64_avx2))) ? Int : Int32
vi =
VectorizationBase.VecUnroll((
Vec(ntuple(_ -> rand(int), Val(W64))...),
Vec(ntuple(_ -> rand(int), Val(W64))...),
Vec(ntuple(_ -> rand(int), Val(W64))...)
)) % int2
xi = tovector(vi)
for f ∈ [-, abs, inv, floor, ceil, trunc, round, sqrt ∘ abs, sign]
@test tovector(@inferred(f(vi))) == map(f, xi)
end
let rtol =
eps(Float64) *
(Bool(VectorizationBase.has_feature(Val(:x86_64_avx512f))) ? 1 : 4) # more accuracte
@test isapprox(
tovector(@inferred(Base.FastMath.inv_fast(vi))),
map(Base.FastMath.inv_fast, xi),
rtol = rtol
)
end
# vpos = VectorizationBase.VecUnroll((
# Vec(ntuple(_ -> Core.VecElement(rand()), Val(W64))),
# Vec(ntuple(_ -> Core.VecElement(rand()), Val(W64))),
# Vec(ntuple(_ -> Core.VecElement(rand()), Val(W64)))
# ))
# for f ∈ [sqrt]
# @test tovector(f(vpos)) == map(f, tovector(vpos))
# end
@test getindex([2, 4, 6, 8], Vec(1, 2, 3, 4)) === Vec(2, 4, 6, 8)
@test searchsortedlast([1, 2, 4, 5, 5, 7], Vec(4, 5, 3, 0)) ===
Vec(3, 5, 2, 0)
@test searchsortedlast(
[1, 2, 4, 5, 5, 7],
Vec(4, 5, 3, 0),
Vec(2, 2, 1, 1),
Vec(4, 6, 3, 6),
Base.Order.Forward
) === Vec(3, 5, 2, 0)
end
println("Binary Functions")
@time @testset "Binary Functions" begin
# TODO: finish getting these tests to pass
# for I1 ∈ (Int32,Int64,UInt32,UInt64), I2 ∈ (Int32,Int64,UInt32,UInt64)
for (vf, bf, testfloat) ∈ [
(VectorizationBase.vadd, +, true),
(VectorizationBase.vadd_fast, Base.FastMath.add_fast, true),
(VectorizationBase.vadd_nsw, +, false),#(VectorizationBase.vadd_nuw,+,false),(VectorizationBase.vadd_nw,+,false),
(VectorizationBase.vsub, -, true),
(VectorizationBase.vsub_fast, Base.FastMath.sub_fast, true),
(VectorizationBase.vsub_nsw, -, false),#(VectorizationBase.vsub_nuw,-,false),(VectorizationBase.vsub_nw,-,false),
(VectorizationBase.vmul, *, true),
(VectorizationBase.vmul_fast, Base.FastMath.mul_fast, true),
(VectorizationBase.vmul_nsw, *, false),#(VectorizationBase.vmul_nuw,*,false),(VectorizationBase.vmul_nw,*,false),
(VectorizationBase.vrem, %, false),
(VectorizationBase.vrem_fast, %, false)
]
for i ∈ -10:10, j ∈ -6:6
((j == 0) && (bf === %)) && continue
@test vf(i % Int8, j % Int8) == bf(i % Int8, j % Int8)
@test vf(i % UInt8, j % UInt8) == bf(i % UInt8, j % UInt8)
@test vf(i % Int16, j % Int16) == bf(i % Int16, j % Int16)
@test vf(i % UInt16, j % UInt16) == bf(i % UInt16, j % UInt16)
@test vf(i % Int32, j % Int32) == bf(i % Int32, j % Int32)
@test vf(i % UInt32, j % UInt32) == bf(i % UInt32, j % UInt32)
@test vf(i % Int64, j % Int64) == bf(i % Int64, j % Int64)
@test vf(i % UInt64, j % UInt64) == bf(i % UInt64, j % UInt64)
@test vf(i % Int128, j % Int128) == bf(i % Int128, j % Int128)
@test vf(i % UInt128, j % UInt128) == bf(i % UInt128, j % UInt128)
end
if testfloat
for i ∈ -1.5:0.39:1.8, j ∈ -3:0.09:3.0
# `===` for `NaN` to pass
@test vf(i, j) === bf(i, j)
@test vf(Float32(i), Float32(j)) === bf(Float32(i), Float32(j))
end
end
end
for i ∈ -1.5:0.39:1.8, j ∈ -3:0.09:3.0
for i ∈ -1.5:0.379:1.8, j ∈ -3:0.089:3.0
@test VectorizationBase.vdiv(i, j) ==
VectorizationBase.vdiv_fast(i, j) ==
1e2i ÷ 1e2j
@test VectorizationBase.vdiv(Float32(i), Float32(j)) ==
VectorizationBase.vdiv_fast(Float32(i), Float32(j)) ==
Float32(1.0f2i) ÷ Float32(1.0f2j)
vr64_ref = 1e-2 * (1e2i % 1e2j)
@test VectorizationBase.vrem(i, j) ≈ vr64_ref atol = 1e-16 rtol = 1e-13
@test VectorizationBase.vrem_fast(i, j) ≈ vr64_ref atol = 1e-16 rtol =
1e-13
vr32_ref = 1.0f-2 * (Float32(1.0f2i) % Float32(1.0f2j))
@test VectorizationBase.vrem(Float32(i), Float32(j)) ≈ vr32_ref atol =
1.0f-7 rtol = 2.0f-5
@test VectorizationBase.vrem_fast(Float32(i), Float32(j)) ≈ vr32_ref atol =
1.0f-7 rtol = 2.0f-5
end
end
let WI = Int(VectorizationBase.pick_vector_width(Int64))
for I1 ∈ (Int32, Int64), I2 ∈ (Int32, Int64, UInt32)
# TODO: No longer skip these either.
sizeof(I1) > sizeof(I2) && continue
vi1 = VectorizationBase.VecUnroll((
Vec(ntuple(_ -> Core.VecElement(rand(I1)), Val(WI))),
Vec(ntuple(_ -> Core.VecElement(rand(I1)), Val(WI))),
Vec(ntuple(_ -> Core.VecElement(rand(I1)), Val(WI))),
Vec(ntuple(_ -> Core.VecElement(rand(I1)), Val(WI)))
))
srange =
one(I2):(Bool(VectorizationBase.has_feature(Val(:x86_64_avx512dq))) ?
I2(8sizeof(I1) - 1) : I2(31))
vi2 = VectorizationBase.VecUnroll((
Vec(ntuple(_ -> Core.VecElement(rand(srange)), Val(WI))),
Vec(ntuple(_ -> Core.VecElement(rand(srange)), Val(WI))),
Vec(ntuple(_ -> Core.VecElement(rand(srange)), Val(WI))),
Vec(ntuple(_ -> Core.VecElement(rand(srange)), Val(WI)))
))
i = rand(srange)
j = rand(I1)
m1 = VectorizationBase.VecUnroll((
MM{WI}(I1(7)),
MM{WI}(I1(1)),
MM{WI}(I1(13)),
MM{WI}(I1(32 % last(srange)))
))
m2 = VectorizationBase.VecUnroll((
MM{WI,2}(I2(3)),
MM{WI,2}(I2(8)),
MM{WI,2}(I2(39 % last(srange))),
MM{WI,2}(I2(17))
))
@test typeof(m1 + I1(11)) === typeof(m1)
@test typeof(m2 + I2(11)) === typeof(m2)
xi1 = tovector(vi1)
xi2 = tovector(vi2)
xi3 = mapreduce(tovector, vcat, VectorizationBase.data(m1))
xi4 = mapreduce(tovector, vcat, VectorizationBase.data(m2))
I3 = promote_type(I1, I2)
# I4 = sizeof(I1) < sizeof(I2) ? I1 : (sizeof(I1) > sizeof(I2) ? I2 : I3)
for f ∈ [
+,
-,
*,
÷,
/,
%,
<<,
>>,
>>>,
⊻,
&,
|,
fld,
mod,
VectorizationBase.rotate_left,
VectorizationBase.rotate_right,
copysign,
maxi,
mini,
maxi_fast,
mini_fast
]
# for f ∈ [+, -, *, div, ÷, /, rem, %, <<, >>, >>>, ⊻, &, |, fld, mod, VectorizationBase.rotate_left, VectorizationBase.rotate_right, copysign, max, min]
# @show f, I1, I2
# if (!Bool(VectorizationBase.has_feature(Val(:x86_64_avx512dq)))) && (f === /) && sizeof(I1) === sizeof(I2) === 8
# continue
# end
check_within_limits(
tovector(@inferred(f(vi1, vi2))),
trunc_int.(
f.(size_trunc_int.(xi1, I3), size_trunc_int.(xi2, I3)),
I3
)
)
check_within_limits(
tovector(@inferred(f(j, vi2))),
trunc_int.(
f.(size_trunc_int.(j, I3), size_trunc_int.(xi2, I3)),
I3
)
)
check_within_limits(
tovector(@inferred(f(vi1, i))),
trunc_int.(
f.(size_trunc_int.(xi1, I3), size_trunc_int.(i, I3)),
I3
)
)
check_within_limits(
tovector(@inferred(f(m1, i))),
trunc_int.(
f.(size_trunc_int.(xi3, I3), size_trunc_int.(i, I3)),
I3
)
)
check_within_limits(
tovector(@inferred(f(m1, vi2))),
trunc_int.(
f.(size_trunc_int.(xi3, I3), size_trunc_int.(xi2, I3)),
I3
)
)
check_within_limits(
tovector(@inferred(f(m1, m2))),
trunc_int.(
f.(size_trunc_int.(xi3, I3), size_trunc_int.(xi4, I3)),
I3
)
)
check_within_limits(
tovector(@inferred(f(m1, m1))),
trunc_int.(
f.(size_trunc_int.(xi3, I1), size_trunc_int.(xi3, I1)),
I1
)
)
check_within_limits(
tovector(@inferred(f(m2, i))),
trunc_int.(
f.(size_trunc_int.(xi4, I3), size_trunc_int.(i, I3)),
I2
)
)
check_within_limits(
tovector(@inferred(f(m2, vi2))),
trunc_int.(
f.(size_trunc_int.(xi4, I3), size_trunc_int.(xi2, I3)),
I2
)
)
check_within_limits(
tovector(@inferred(f(m2, m2))),
trunc_int.(
f.(size_trunc_int.(xi4, I3), size_trunc_int.(xi4, I3)),
I2
)
)
check_within_limits(
tovector(@inferred(f(m2, m1))),
trunc_int.(
f.(size_trunc_int.(xi4, I3), size_trunc_int.(xi3, I3)),
I3
)
)
if !(
(f === VectorizationBase.rotate_left) ||
(f === VectorizationBase.rotate_right)
)
check_within_limits(
tovector(@inferred(f(j, m1))),
trunc_int.(f.(j, xi3), I1)
)
# @show 12
# check_within_limits(tovector(@inferred(f(j, m2))), trunc_int.(f.(size_trunc_int.(j, I1), size_trunc_int.(xi4, I1)), I1));
end
end
@test tovector(@inferred((vi1 % 17)^(i % 15))) ≈
Float64.(xi1 .% 17) .^ (i % 15)
@test @inferred(
VectorizationBase.vall(
@inferred(1 - MM{WI}(1)) == (1 - Vec(ntuple(identity, Val(WI))...))
)
)
end
vf1 = VectorizationBase.VecUnroll((
Vec(ntuple(_ -> Core.VecElement(randn()), Val(WI))),
Vec(ntuple(_ -> Core.VecElement(randn()), Val(WI)))
))
vf2 = Vec(ntuple(_ -> Core.VecElement(randn()), Val(WI)))
@test vf2 * 1 // 2 === vf2 * 0.5 === vf2 / 2
xf1 = tovector(vf1)
xf2 = tovector(vf2)
xf22 = vcat(xf2, xf2)
a = randn()
for f ∈ [
+,
-,
*,
/,
%,
max,
min,
copysign,
rem,
Base.FastMath.max_fast,
Base.FastMath.min_fast,
Base.FastMath.div_fast,
Base.FastMath.rem_fast,
Base.FastMath.hypot_fast
]
# @show f
@test tovector(@inferred(f(vf1, vf2))) ≈ f.(xf1, xf22)
@test tovector(@inferred(f(a, vf1))) ≈ f.(a, xf1)
@test tovector(@inferred(f(a, vf2))) ≈ f.(a, xf2)
@test tovector(@inferred(f(vf1, a))) ≈ f.(xf1, a)
@test tovector(@inferred(f(vf2, a))) ≈ f.(xf2, a)
end
vi2 = VectorizationBase.VecUnroll((
Vec(ntuple(_ -> Core.VecElement(rand(1:M-1)), Val(WI))),
Vec(ntuple(_ -> Core.VecElement(rand(1:M-1)), Val(WI))),
Vec(ntuple(_ -> Core.VecElement(rand(1:M-1)), Val(WI))),
Vec(ntuple(_ -> Core.VecElement(rand(1:M-1)), Val(WI)))
))
vones, vi2f, vtwos = promote(1.0, vi2, 2.0f0) # promotes a binary function, right? Even when used with three args?
@test vones === VectorizationBase.VecUnroll((
vbroadcast(Val(WI), 1.0),
vbroadcast(Val(WI), 1.0),
vbroadcast(Val(WI), 1.0),
vbroadcast(Val(WI), 1.0)
))
@test vtwos === VectorizationBase.VecUnroll((
vbroadcast(Val(WI), 2.0),
vbroadcast(Val(WI), 2.0),
vbroadcast(Val(WI), 2.0),
vbroadcast(Val(WI), 2.0)
))
@test VectorizationBase.vall(VectorizationBase.collapse_and(vi2f == vi2))
W32 = StaticInt(WI) * StaticInt(2)
vf2 = VectorizationBase.VecUnroll((
Vec(ntuple(_ -> Core.VecElement(randn(Float32)), W32)),
Vec(ntuple(_ -> Core.VecElement(randn(Float32)), W32))
))
vones32, v2f32, vtwos32 = promote(1.0, vf2, 2.0f0) # promotes a binary function, right? Even when used with three args?
if VectorizationBase.simd_integer_register_size() ==
VectorizationBase.register_size()
@test vones32 ===
VectorizationBase.VecUnroll((
vbroadcast(W32, 1.0f0),
vbroadcast(W32, 1.0f0)
)) ===
VectorizationBase.VecUnroll((
vbroadcast(W32, Float16(1)),
vbroadcast(W32, Float16(1))
))
@test vtwos32 ===
VectorizationBase.VecUnroll((
vbroadcast(W32, 2.0f0),
vbroadcast(W32, 2.0f0)
)) ===
VectorizationBase.VecUnroll((
vbroadcast(W32, Float16(2)),
vbroadcast(W32, Float16(2))
))
@test vf2 === v2f32
else
@test vones32 === VectorizationBase.VecUnroll((
vbroadcast(W32, 1.0),
vbroadcast(W32, 1.0)
))
@test vtwos32 === VectorizationBase.VecUnroll((
vbroadcast(W32, 2.0),
vbroadcast(W32, 2.0)
))
@test convert(Float64, vf2) === v2f32
end
vtwosf16 = convert(Float16, vtwos32)
@test vtwosf16 isa VectorizationBase.VecUnroll{
1,
Int(W32),
Float16,
Vec{Int(W32),Float16}
}
vtwosf32 = convert(Float32, vtwos32)
@test vtwosf32 isa VectorizationBase.VecUnroll{
1,
Int(W32),
Float32,
Vec{Int(W32),Float32}
}
@test promote(vtwosf16, vtwosf16) === (vtwosf32, vtwosf32)
@test vtwosf16 + vtwosf16 === vtwosf32 + vtwosf32
i = rand(1:31)
m1 = VectorizationBase.VecUnroll((
MM{WI}(7),
MM{WI}(1),
MM{WI}(13),
MM{WI}(18)
))
@test tovector(clamp(m1, 2:i)) == clamp.(tovector(m1), 2, i)
@test tovector(mod(m1, 1:i)) == mod1.(tovector(m1), i)
@test VectorizationBase.vdivrem.(1:30, 1:30') == divrem.(1:30, 1:30')
@test VectorizationBase.vcld.(1:30, 1:30') == cld.(1:30, 1:30')
@test VectorizationBase.vrem.(1:30, 1:30') == rem.(1:30, 1:30')
@test gcd(Vec(42, 64, 0, -37), Vec(18, 96, -38, 0)) === Vec(6, 32, 38, 37)
@test lcm(Vec(24, 16, 42, 0), Vec(18, 12, 18, 17)) === Vec(72, 48, 126, 0)
end
if VectorizationBase.simd_integer_register_size() ≥ 16
@test VecUnroll((
Vec(ntuple(Int32, Val(4))...),
Vec(ntuple(Int32 ∘ Base.Fix2(+, 4), Val(4))...)
)) << Vec(0x01, 0x02, 0x03, 0x04) === VecUnroll((
Vec(map(Int32, (2, 8, 24, 64))...),
Vec(map(Int32, (10, 24, 56, 128))...)
))
end
@test VectorizationBase.vdiv_fast(VecUnroll((11, 12, 13, 14)), 3) ===
VecUnroll((11, 12, 13, 14)) ÷ 3 ===
VecUnroll((3, 4, 4, 4))
@test VectorizationBase.vand(true, true) == true
@test VectorizationBase.vand(false, false) ==
VectorizationBase.vand(false, true) ==
VectorizationBase.vand(true, false) ==
false
@test VectorizationBase.vor(true, true) ==
VectorizationBase.vor(false, true) ==
VectorizationBase.vor(true, false) ==
true
@test VectorizationBase.vor(false, false) == false
@test VectorizationBase.vxor(false, true) ==
VectorizationBase.vxor(true, false) ==
true
@test VectorizationBase.vxor(false, false) ==
VectorizationBase.vxor(true, true) ==
false
end
println("Ternary Functions")
@time @testset "Ternary Functions" begin
for T ∈ (Float32, Float64)
v1, v2, v3, m = let W = @inferred(VectorizationBase.pick_vector_width(T))
v1 = VectorizationBase.VecUnroll((
Vec(ntuple(_ -> randn(T), W)...),
Vec(ntuple(_ -> randn(T), W)...)
))
v2 = VectorizationBase.VecUnroll((
Vec(ntuple(_ -> randn(T), W)...),
Vec(ntuple(_ -> randn(T), W)...)
))
v3 = VectorizationBase.VecUnroll((
Vec(ntuple(_ -> randn(T), W)...),
Vec(ntuple(_ -> randn(T), W)...)
))
_W = Int(@inferred(VectorizationBase.pick_vector_width(T)))
m = VectorizationBase.VecUnroll((
Mask{_W}(rand(UInt16)),
Mask{_W}(rand(UInt16))
))
v1, v2, v3, m
end
x1 = tovector(v1)
x2 = tovector(v2)
x3 = tovector(v3)
a = randn(T)
b = randn(T)
a64 = Float64(a)
b64 = Float64(b) # test promotion
mv = tovector(m)
for f ∈ [
muladd,
fma,
clamp,
VectorizationBase.vmuladd_fast,
VectorizationBase.vfma_fast,
VectorizationBase.vfmadd,
VectorizationBase.vfnmadd,
VectorizationBase.vfmsub,
VectorizationBase.vfnmsub,
VectorizationBase.vfmadd_fast,
VectorizationBase.vfnmadd_fast,
VectorizationBase.vfmsub_fast,
VectorizationBase.vfnmsub_fast,
VectorizationBase.vfmadd231,
VectorizationBase.vfnmadd231,
VectorizationBase.vfmsub231,
VectorizationBase.vfnmsub231
]
@test tovector(@inferred(f(v1, v2, v3))) ≈ map(f, x1, x2, x3)
@test tovector(@inferred(f(v1, v2, a64))) ≈ f.(x1, x2, a)
@test tovector(@inferred(f(v1, a64, v3))) ≈ f.(x1, a, x3)
@test tovector(@inferred(f(a64, v2, v3))) ≈ f.(a, x2, x3)
@test tovector(@inferred(f(v1, a64, b64))) ≈ f.(x1, a, b)
@test tovector(@inferred(f(a64, v2, b64))) ≈ f.(a, x2, b)
@test tovector(@inferred(f(a64, b64, v3))) ≈ f.(a, b, x3)
@test tovector(@inferred(VectorizationBase.ifelse(f, m, v1, v2, v3))) ≈
ifelse.(mv, f.(x1, x2, x3), x3)
@test tovector(@inferred(VectorizationBase.ifelse(f, m, v1, v2, a64))) ≈
ifelse.(mv, f.(x1, x2, a), a)
@test tovector(@inferred(VectorizationBase.ifelse(f, m, v1, a64, v3))) ≈
ifelse.(mv, f.(x1, a, x3), x3)
@test tovector(@inferred(VectorizationBase.ifelse(f, m, a64, v2, v3))) ≈
ifelse.(mv, f.(a, x2, x3), x3)
@test tovector(
@inferred(VectorizationBase.ifelse(f, m, v1, a64, b64))
) ≈ ifelse.(mv, f.(x1, a, b), b)
@test tovector(
@inferred(VectorizationBase.ifelse(f, m, a64, v2, b64))
) ≈ ifelse.(mv, f.(a, x2, b), b)
@test tovector(
@inferred(VectorizationBase.ifelse(f, m, a64, b64, v3))
) ≈ ifelse.(mv, f.(a, b, x3), x3)
end
@test tovector(@inferred(VectorizationBase.vfmaddsub(v1, v2, v3))) ≈
muladd.(x1, x2, x3 .* ifelse.(iseven.(eachindex(x1)), 1, -1))
@test tovector(@inferred(VectorizationBase.vfmsubadd(v1, v2, v3))) ≈
muladd.(x1, x2, x3 .* ifelse.(iseven.(eachindex(x1)), -1, 1))
end
let WI = Int(VectorizationBase.pick_vector_width(Int64))
vi64 = VectorizationBase.VecUnroll((
Vec(ntuple(_ -> rand(Int64), Val(WI))...),
Vec(ntuple(_ -> rand(Int64), Val(WI))...),
Vec(ntuple(_ -> rand(Int64), Val(WI))...)
))
vi32 = VectorizationBase.VecUnroll((
Vec(ntuple(_ -> rand(Int32), Val(WI))...),
Vec(ntuple(_ -> rand(Int32), Val(WI))...),
Vec(ntuple(_ -> rand(Int32), Val(WI))...)
))
xi64 = tovector(vi64)
xi32 = tovector(vi32)
@test tovector(
@inferred(VectorizationBase.ifelse(vi64 > vi32, vi64, vi32))
) == ifelse.(xi64 .> xi32, xi64, xi32)
@test tovector(
@inferred(VectorizationBase.ifelse(vi64 < vi32, vi64, vi32))
) == ifelse.(xi64 .< xi32, xi64, xi32)
@test tovector(@inferred(VectorizationBase.ifelse(true, vi64, vi32))) ==
ifelse.(true, xi64, xi32)
@test tovector(@inferred(VectorizationBase.ifelse(false, vi64, vi32))) ==
ifelse.(false, xi64, xi32)
vu64_1 = VectorizationBase.VecUnroll((
Vec(ntuple(_ -> rand(UInt64), Val(WI))...),
Vec(ntuple(_ -> rand(UInt64), Val(WI))...),
Vec(ntuple(_ -> rand(UInt64), Val(WI))...)
))
vu64_2 = VectorizationBase.VecUnroll((
Vec(ntuple(_ -> rand(UInt64), Val(WI))...),
Vec(ntuple(_ -> rand(UInt64), Val(WI))...),
Vec(ntuple(_ -> rand(UInt64), Val(WI))...)
))
vu64_3 = VectorizationBase.VecUnroll((
Vec(ntuple(_ -> rand(UInt64), Val(WI))...),
Vec(ntuple(_ -> rand(UInt64), Val(WI))...),
Vec(ntuple(_ -> rand(UInt64), Val(WI))...)
))
xu1 = tovector(vu64_1)
xu2 = tovector(vu64_2)
xu3 = tovector(vu64_3)
for f ∈ [
clamp,
muladd,
VectorizationBase.ifmalo,
VectorizationBase.ifmahi,
VectorizationBase.vfmadd,
VectorizationBase.vfnmadd,
VectorizationBase.vfmsub,
VectorizationBase.vfnmsub
]
@test tovector(@inferred(f(vu64_1, vu64_2, vu64_3))) ==
f.(xu1, xu2, xu3)
end
end
end
println("Special functions")
@time @testset "Special functions" begin
if VERSION ≥ v"1.6.0-DEV.674" &&
Bool(VectorizationBase.has_feature(Val(Symbol("x86_64_sse4.1"))))
erfs = [
0.1124629160182849,
0.22270258921047847,
0.3286267594591274,
0.42839235504666845,
0.5204998778130465,
0.6038560908479259,
0.6778011938374184,
0.7421009647076605,
0.7969082124228322,
0.8427007929497149,
0.8802050695740817,
0.9103139782296353,
0.9340079449406524,
0.9522851197626487,
0.9661051464753108,
0.976348383344644,
0.9837904585907745,
0.9890905016357308,
0.9927904292352575,
0.9953222650189527,
0.997020533343667,
0.9981371537020182,
0.9988568234026434,
0.999311486103355,
0.999593047982555,
0.9997639655834707,
0.9998656672600594,
0.9999249868053346,
0.9999589021219005,
0.9999779095030014,
0.9999883513426328,
0.9999939742388483
]
if Bool(VectorizationBase.has_feature(Val(:x86_64_avx512f)))
v = VectorizationBase.verf(Vec{8,Float64}(0.1:0.1:0.8...))
@test [v(i) for i = 1:8] ≈ erfs[1:8]
v = VectorizationBase.verf(Vec{16,Float32}(0.1:0.1:1.6...))
@test [v(i) for i = 1:16] ≈ erfs[1:16]
end
if Bool(VectorizationBase.has_feature(Val(:x86_64_avx)))
v = VectorizationBase.verf(Vec{4,Float64}(0.1:0.1:0.4...))
@test [v(i) for i = 1:4] ≈ erfs[1:4]
v = VectorizationBase.verf(Vec{8,Float32}(0.1:0.1:0.8...))
@test [v(i) for i = 1:8] ≈ erfs[1:8]
end
if Bool(VectorizationBase.has_feature(Val(Symbol("x86_64_sse4.1"))))
v = VectorizationBase.verf(Vec{2,Float64}(0.1:0.1:0.2...))
@test [v(i) for i = 1:2] ≈ erfs[1:2]
end
end
end
println("Non-broadcasting operations")
@time @testset "Non-broadcasting operations" begin
v1 = Vec(ntuple(_ -> Core.VecElement(randn()), Val(W64)))
vu1 = VectorizationBase.VecUnroll((
v1,
Vec(ntuple(_ -> Core.VecElement(randn()), Val(W64)))
))
v2 = Vec(ntuple(_ -> Core.VecElement(rand(-100:100)), Val(W64)))
vu2 = VectorizationBase.VecUnroll((
v2,
Vec(ntuple(_ -> Core.VecElement(rand(-100:100)), Val(W64)))
))
@test @inferred(VectorizationBase.vsum(2.3, v1)) ≈
@inferred(VectorizationBase.vsum(v1)) + 2.3 ≈
@inferred(
VectorizationBase.vsum(VectorizationBase.addscalar(v1, 2.3))
) ≈
@inferred(
VectorizationBase.vsum(VectorizationBase.addscalar(2.3, v1))
)
@test @inferred(
VectorizationBase.vsum(VectorizationBase.collapse_add(vu1))
) + 2.3 ≈
@inferred(
VectorizationBase.vsum(
VectorizationBase.collapse_add(
VectorizationBase.addscalar(vu1, 2.3)
)
)
) ≈
@inferred(
VectorizationBase.vsum(
VectorizationBase.collapse_add(
VectorizationBase.addscalar(2.3, vu1)
)
)
)
@test @inferred(VectorizationBase.vsum(v2)) + 3 ==
@inferred(
VectorizationBase.vsum(VectorizationBase.addscalar(v2, 3))
) ==
@inferred(VectorizationBase.vsum(VectorizationBase.addscalar(3, v2)))
@test @inferred(
VectorizationBase.vsum(VectorizationBase.collapse_add(vu2))
) + 3 ==
@inferred(
VectorizationBase.vsum(
VectorizationBase.collapse_add(
VectorizationBase.addscalar(vu2, 3)
)
)
) ==
@inferred(
VectorizationBase.vsum(
VectorizationBase.collapse_add(
VectorizationBase.addscalar(3, vu2)
)
)
)
@test @inferred(VectorizationBase.vprod(v1)) * 2.3 ≈
@inferred(
VectorizationBase.vprod(VectorizationBase.mulscalar(v1, 2.3))
) ≈
@inferred(
VectorizationBase.vprod(VectorizationBase.mulscalar(2.3, v1))
)
@test @inferred(VectorizationBase.vprod(v2)) * 3 ==
@inferred(VectorizationBase.vprod(VectorizationBase.mulscalar(3, v2)))
@test @inferred(
VectorizationBase.vall(v1 + v2 == VectorizationBase.addscalar(v1, v2))
)
@test 4.0 == @inferred(VectorizationBase.addscalar(2.0, 2.0))
v3 = Vec(ntuple(Base.Fix2(-, 1), W64)...)
vu3 = VectorizationBase.VecUnroll((v3, v3 - 1))
v4 = Vec(ntuple(Base.Fix2(-, 1.0), W64)...)
v5 = Vec(ntuple(Base.Fix2(-, 1.0f0), W32)...)
@test @inferred(VectorizationBase.vmaximum(v3)) === @inferred(
VectorizationBase.vmaximum(VectorizationBase.maxscalar(v3, W64 - 2))
)
@test @inferred(VectorizationBase.vmaximum(v3 % UInt)) === @inferred(
VectorizationBase.vmaximum(
VectorizationBase.maxscalar(v3 % UInt, (W64 - 2) % UInt)
)
)
@test @inferred(VectorizationBase.vmaximum(v4)) === @inferred(
VectorizationBase.vmaximum(
VectorizationBase.maxscalar(v4, prevfloat(W64 - 1.0))
)
)
@test @inferred(
VectorizationBase.vmaximum(
VectorizationBase.maxscalar(v4, nextfloat(W64 - 1.0))
)
) == nextfloat(W64 - 1.0)
@test @inferred(VectorizationBase.vmaximum(v5)) ===
@inferred(
VectorizationBase.vmaximum(
VectorizationBase.maxscalar(v5, prevfloat(W32 - 1.0f0))
)
) ===
VectorizationBase.vmaximum(
VectorizationBase.maxscalar(prevfloat(W32 - 1.0f0), v5)
)
@test @inferred(
VectorizationBase.vmaximum(
VectorizationBase.maxscalar(v5, nextfloat(W32 - 1.0f0))
)
) ==
@inferred(
VectorizationBase.vmaximum(
VectorizationBase.maxscalar(nextfloat(W32 - 1.0f0), v5)
)
) ==
nextfloat(W32 - 1.0f0)
@test VectorizationBase.maxscalar(v3, 2)(1) == 2
@test (VectorizationBase.maxscalar(v3, 2) ≠ v3) === Mask{W64}(0x01)
@test VectorizationBase.maxscalar(v3, -1) === v3
@test VectorizationBase.vmaximum(
VectorizationBase.maxscalar(v3 % UInt, -1 % UInt)
) === -1 % UInt
@test VectorizationBase.maxscalar(v4, 1e-16) ===
VectorizationBase.insertelement(v4, 1e-16, 0)
@test VectorizationBase.maxscalar(v4, -1e-16) === v4
@test VectorizationBase.vmaximum(VectorizationBase.collapse_max(vu3)) ==
W64 - 1
@test VectorizationBase.vmaximum(
VectorizationBase.collapse_max(VectorizationBase.maxscalar(vu3, W64 - 2))
) == W64 - 1
@test VectorizationBase.vmaximum(
VectorizationBase.collapse_max(VectorizationBase.maxscalar(vu3, W64))
) == W64
@test VectorizationBase.vminimum(VectorizationBase.collapse_min(vu3)) == -1
@test VectorizationBase.vminimum(
VectorizationBase.collapse_min(VectorizationBase.minscalar(vu3, 0))
) == -1
@test VectorizationBase.vminimum(
VectorizationBase.collapse_min(
VectorizationBase.minscalar(vu3, -2)
)
) ==
VectorizationBase.vminimum(
VectorizationBase.collapse_min(
VectorizationBase.minscalar(-2, vu3)
)
) ==
-2
end
println("broadcasting")
@time @testset "broadcasting" begin
@test VectorizationBase.vzero(Val(1), UInt32) ===
VectorizationBase.vzero(StaticInt(1), UInt32) ===
VectorizationBase.vzero(UInt32) ===
zero(UInt32)
@test VectorizationBase.vzero(Val(1), Int) ===
VectorizationBase.vzero(StaticInt(1), Int) ===
VectorizationBase.vzero(Int) ===
0
@test VectorizationBase.vzero(Val(1), Float32) ===
VectorizationBase.vzero(StaticInt(1), Float32) ===
VectorizationBase.vzero(Float32) ===
0.0f0
@test VectorizationBase.vzero(Val(1), Float64) ===
VectorizationBase.vzero(StaticInt(1), Float64) ===
VectorizationBase.vzero(Float64) ===
0.0
@test VectorizationBase.vzero() === VectorizationBase.vzero(W64S, Float64)
@test VectorizationBase.vbroadcast(StaticInt(2) * W64S, one(Int64)) ===
VectorizationBase.vbroadcast(StaticInt(2) * W64S, one(Int32))
@test VectorizationBase.vbroadcast(StaticInt(2) * W64S, one(UInt64)) ===
VectorizationBase.vbroadcast(StaticInt(2) * W64S, one(UInt32)) ===
one(VectorizationBase.Vec{2W64,UInt64}) ===
oneunit(VectorizationBase.Vec{2W64,UInt64})
@test VectorizationBase.vall(
VectorizationBase.vbroadcast(W64S, pointer(A)) ==
vbroadcast(W64S, first(A))
)
@test VectorizationBase.vbroadcast(W64S, pointer(A, 2)) ===
Vec{W64}(A[2]) ===
Vec(A[2])
@test zero(
VectorizationBase.VecUnroll((
VectorizationBase.vbroadcast(W64S, pointer(A)),
VectorizationBase.vbroadcast(W64S, pointer(A, 2))
))
) === VectorizationBase.VecUnroll((
VectorizationBase.vzero(W64S, Float64),
VectorizationBase.vzero()
))
@test VectorizationBase.VecUnroll{2,W64,Float64}(first(A)) ===
VectorizationBase.VecUnroll{2,W64,Float64}(
VectorizationBase.vbroadcast(W64S, pointer(A))
) ===
VectorizationBase.VecUnroll((
VectorizationBase.vbroadcast(W64S, pointer(A)),
VectorizationBase.vbroadcast(W64S, pointer(A)),
VectorizationBase.vbroadcast(W64S, pointer(A))
)) ===
VectorizationBase.VecUnroll{2}(
VectorizationBase.vbroadcast(W64S, pointer(A))
)
end
println("CartesianVIndex")
@time @testset "CartesianVIndex" begin
@test VectorizationBase.maybestaticfirst(CartesianIndices(A)) ===
VectorizationBase.CartesianVIndex(
ntuple(_ -> VectorizationBase.One(), ndims(A))
)
@test VectorizationBase.maybestaticlast(CartesianIndices(A)) ===
VectorizationBase.CartesianVIndex(size(A))
@test VectorizationBase.CartesianVIndex((
StaticInt(1),
2,
VectorizationBase.CartesianVIndex((StaticInt(4), StaticInt(7))),
CartesianIndex(12, 14),
StaticInt(2),
1
)) === VectorizationBase.CartesianVIndex((
StaticInt(1),
2,
StaticInt(4),
StaticInt(7),
12,
14,
StaticInt(2),
1
))
@test Tuple(
VectorizationBase.CartesianVIndex((
StaticInt(1),
2,
VectorizationBase.CartesianVIndex((StaticInt(4), StaticInt(7))),
CartesianIndex(12, 14),
StaticInt(2),
1
))
) === (StaticInt(1), 2, StaticInt(4), StaticInt(7), 12, 14, StaticInt(2), 1)
@test length(
VectorizationBase.CartesianVIndex((
StaticInt(1),
2,
VectorizationBase.CartesianVIndex((StaticInt(4), StaticInt(7))),
CartesianIndex(12, 14),
StaticInt(2),
1
))
) === 8
@test VectorizationBase.static_length(
VectorizationBase.CartesianVIndex((
StaticInt(1),
2,
VectorizationBase.CartesianVIndex((StaticInt(4), StaticInt(7))),
CartesianIndex(12, 14),
StaticInt(2),
1
))
) === StaticInt{8}()
@test VectorizationBase.CartesianVIndex((StaticInt(-4), StaticInt(7))):VectorizationBase.CartesianVIndex((
StaticInt(14),
StaticInt(73)
)) === CartesianIndices((
StaticInt(-4):StaticInt(14),
StaticInt(7):StaticInt(73)
))
@test VectorizationBase.maybestaticfirst(CartesianIndices(A)):VectorizationBase.maybestaticlast(
CartesianIndices(A)
) == CartesianIndices(A)
@test VectorizationBase.maybestaticfirst(CartesianIndices(A)):VectorizationBase.maybestaticlast(
CartesianIndices(A)
) === CartesianIndices(map(i -> VectorizationBase.One():i, size(A)))
end
println("Promotion")
@time @testset "Promotion" begin
vi2 = VectorizationBase.VecUnroll((
Vec(ntuple(_ -> Core.VecElement(rand(1:M-1)), Val(W64))),
Vec(ntuple(_ -> Core.VecElement(rand(1:M-1)), Val(W64))),
Vec(ntuple(_ -> Core.VecElement(rand(1:M-1)), Val(W64))),
Vec(ntuple(_ -> Core.VecElement(rand(1:M-1)), Val(W64)))
))
vones, vi2f, vtwos = @inferred(promote(1.0, vi2, 2.0f0)) # promotes a binary function, right? Even when used with three args?
@test vones === VectorizationBase.VecUnroll((
vbroadcast(Val(W64), 1.0),
vbroadcast(Val(W64), 1.0),
vbroadcast(Val(W64), 1.0),
vbroadcast(Val(W64), 1.0)
))
@test vtwos === VectorizationBase.VecUnroll((
vbroadcast(Val(W64), 2.0),
vbroadcast(Val(W64), 2.0),
vbroadcast(Val(W64), 2.0),
vbroadcast(Val(W64), 2.0)
))
@test @inferred(
VectorizationBase.vall(VectorizationBase.collapse_and(vi2f == vi2))
)
vf2 = VectorizationBase.VecUnroll((
Vec(ntuple(_ -> Core.VecElement(randn(Float32)), StaticInt(W32))),
Vec(ntuple(_ -> Core.VecElement(randn(Float32)), StaticInt(W32)))
))
vones32, v2f32, vtwos32 = @inferred(promote(1.0, vf2, 2.0f0)) # promotes a binary function, right? Even when used with three args?
@test vones32 === VectorizationBase.VecUnroll((
vbroadcast(StaticInt(W32), 1.0f0),
vbroadcast(StaticInt(W32), 1.0f0)
))
@test vtwos32 === VectorizationBase.VecUnroll((
vbroadcast(StaticInt(W32), 2.0f0),
vbroadcast(StaticInt(W32), 2.0f0)
))
@test vf2 === v2f32
@test isequal(tovector(@inferred(bswap(vf2))), map(bswap, tovector(vf2)))
vm = if Bool(VectorizationBase.has_feature(Val(:x86_64_avx512dq)))
VectorizationBase.VecUnroll((
MM{W64}(rand(Int)),
MM{W64}(rand(Int)),
MM{W64}(rand(Int)),
MM{W64}(rand(Int))
))
else
VectorizationBase.VecUnroll((
MM{W64}(rand(Int32)),
MM{W64}(rand(Int32)),
MM{W64}(rand(Int32)),
MM{W64}(rand(Int32))
))
end
@test tovector(@inferred(vm > vi2)) == (tovector(vm) .> tovector(vi2))
m = VecUnroll((Mask{W64}(rand(UInt)), Mask{W64}(rand(UInt))))
v64f = VecUnroll((
Vec(ntuple(_ -> randn(), Val{W64}())...),
Vec(ntuple(_ -> randn(), Val{W64}())...)
))
v32i = VecUnroll((
Vec(ntuple(_ -> rand(Int32(-100):Int32(100)), Val{W64}())...),
Vec(ntuple(_ -> rand(Int32(-100):Int32(100)), Val{W64}())...)
))
mtv = tovector(m)
v64ftv = tovector(v64f)
v32itv = tovector(v32i)
vum = @inferred(muladd(v64f, v32i, m))
@test vum isa VectorizationBase.VecUnroll
@test tovector(vum) ≈ muladd.(v64ftv, v32itv, mtv)
vum = @inferred(muladd(v64f, m, v32i))
@test vum isa VectorizationBase.VecUnroll
@test tovector(vum) ≈ muladd.(v64ftv, mtv, v32itv)
vum = @inferred(muladd(v32i, v64f, m))
@test vum isa VectorizationBase.VecUnroll
@test tovector(vum) ≈ muladd.(v32itv, v64ftv, mtv)
vum = @inferred(muladd(v32i, m, v64f))
@test vum isa VectorizationBase.VecUnroll
@test tovector(vum) ≈ muladd.(v32itv, mtv, v64ftv)
vum = @inferred(muladd(m, v64f, v32i))
@test vum isa VectorizationBase.VecUnroll
@test tovector(vum) ≈ muladd.(mtv, v64ftv, v32itv)
vum = @inferred(muladd(m, v32i, v64f))
@test vum isa VectorizationBase.VecUnroll
@test tovector(vum) ≈ muladd.(mtv, v32itv, v64ftv)
vx = convert(Vec{16,Int64}, 1)
@test typeof(vx) === typeof(zero(vx)) === Vec{16,Int64}
vxf32 = Vec(
ntuple(
_ -> randn(Float32),
VectorizationBase.pick_vector_width(Float32)
)...
)
xf32, yf32 = promote(vxf32, 1.0)
@test xf32 === vxf32
@test yf32 ===
vbroadcast(VectorizationBase.pick_vector_width(Float32), 1.0f0)
vxi32 = Vec(
ntuple(_ -> rand(Int32), VectorizationBase.pick_vector_width(Int32))...
)
xi32, yi32 = promote(vxi32, one(Int64))
@test xi32 === vxi32
@test yi32 ===
vbroadcast(VectorizationBase.pick_vector_width(Int32), one(Int32))
@test ntoh(vxi32) === Vec(map(ntoh, Tuple(vxi32))...)
@test VecUnroll((1.0, 2.0, 3.0)) * VecUnroll((1.0f0, 2.0f0, 3.0f0)) ===
VecUnroll((1.0, 4.0, 9.0))
end
println("Lazymul")
@time @testset "Lazymul" begin
# partially covered in memory
for i ∈ (-5, -1, 0, 1, 4, 8), j ∈ (-5, -1, 0, 1, 4, 8)
@test @inferred(VectorizationBase.lazymul(StaticInt(i), StaticInt(j))) ===
StaticInt(i * j)
end
fi = VectorizationBase.LazyMulAdd{8,0}(MM{8}(StaticInt(16)))
si = VectorizationBase.LazyMulAdd{2}(240)
@test @inferred(VectorizationBase.vadd_nsw(fi, si)) ===
VectorizationBase.LazyMulAdd{2,128}(MM{8,4}(240))
end
# TODO: Put something here.
# @time @testset "Arch Functions" begin
# @test VectorizationBase.dynamic_register_size() == @inferred(VectorizationBase.register_size()) == @inferred(VectorizationBase.register_size())
# @test VectorizationBase.dynamic_integer_register_size() == @inferred(VectorizationBase.simd_integer_register_size()) == @inferred(VectorizationBase.ssimd_integer_register_size())
# @test VectorizationBase.dynamic_register_count() == @inferred(VectorizationBase.register_count()) == @inferred(VectorizationBase.sregister_count())
# @test VectorizationBase.dynamic_fma_fast() == VectorizationBase.fma_fast()
# @test VectorizationBase.dynamic_has_opmask_registers() == VectorizationBase.has_opmask_registers()
# end
println("Static Zero and One")
@time @testset "Static Zero and One" begin
vx = randnvec(W64)
vu = VectorizationBase.VecUnroll((vx, randnvec(W64)))
vm = MM{16}(24)
for f ∈ [+, Base.FastMath.add_fast]
@test f(vx, VectorizationBase.Zero()) ===
f(VectorizationBase.Zero(), vx) ===
vx
@test f(vu, VectorizationBase.Zero()) ===
f(VectorizationBase.Zero(), vu) ===
vu
@test f(vm, VectorizationBase.Zero()) ===
f(VectorizationBase.Zero(), vm) ===
vm
end
for f ∈ [-, Base.FastMath.sub_fast]
@test f(vx, VectorizationBase.Zero()) === vx
@test f(VectorizationBase.Zero(), vx) === -vx
@test f(vu, VectorizationBase.Zero()) === vu
@test f(VectorizationBase.Zero(), vu) === -vu
@test f(vm, VectorizationBase.Zero()) === vm
@test f(VectorizationBase.Zero(), vm) === -vm
end
for f ∈ [*, Base.FastMath.mul_fast]
@test f(vx, VectorizationBase.Zero()) ===
f(VectorizationBase.Zero(), vx) ===
VectorizationBase.Zero()
@test f(vu, VectorizationBase.Zero()) ===
f(VectorizationBase.Zero(), vu) ===
VectorizationBase.Zero()
@test f(vm, VectorizationBase.Zero()) ===
f(VectorizationBase.Zero(), vm) ===
VectorizationBase.Zero()
@test f(vx, VectorizationBase.One()) ===
f(VectorizationBase.One(), vx) ===
vx
@test f(vu, VectorizationBase.One()) ===
f(VectorizationBase.One(), vu) ===
vu
@test f(vm, VectorizationBase.One()) ===
f(VectorizationBase.One(), vm) ===
vm
end
vnan = NaN * vx
for f ∈ [
fma,
muladd,
VectorizationBase.vfma_fast,
VectorizationBase.vmuladd_fast
]
@test f(vnan, VectorizationBase.Zero(), vx) === vx
@test f(VectorizationBase.Zero(), vnan, vx) === vx
end
end
@inline function vlog(x1::VectorizationBase.AbstractSIMD{W,Float64}) where {W} # Testing if an assorted mix of operations
x2 = reinterpret(UInt64, x1)
x3 = x2 >>> 0x0000000000000020
greater_than_zero = x1 > zero(x1)
alternative = VectorizationBase.ifelse(x1 == zero(x1), -Inf, NaN)
isinf = x1 == Inf
x5 = x3 + 0x0000000000095f62
x6 = x5 >>> 0x0000000000000014
x7 = x6 - 0x00000000000003ff
x8 = convert(Float64, x7 % Int)
x9 = x5 << 0x0000000000000020
x10 = x9 & 0x000fffff00000000
x11 = x10 + 0x3fe6a09e00000000
x12 = x2 & 0x00000000ffffffff
x13 = x11 | x12
x14 = reinterpret(Float64, x13)
x15 = x14 - 1.0
x16 = x15 * x15
x17 = 0.5 * x16
x18 = x14 + 1.0
x19 = x15 / x18
x20 = x19 * x19
x21 = x20 * x20
x22 = vfmadd(x21, 0.15313837699209373, 0.22222198432149784)
x23 = vfmadd(x21, x22, 0.3999999999940942)
x24 = x23 * x21
x25 = vfmadd(x21, 0.14798198605116586, 0.1818357216161805)
x26 = vfmadd(x21, x25, 0.2857142874366239)
x27 = vfmadd(x21, x26, 0.6666666666666735)
x28 = x27 * x20
x29 = x24 + x17
x30 = x29 + x28
x31 = x8 * 1.9082149292705877e-10
x32 = vfmadd(x19, x30, x31)
x33 = x15 - x17
x34 = x33 + x32
x35 = vfmadd(x8, 0.6931471803691238, x34)
x36 = VectorizationBase.ifelse(greater_than_zero, x35, alternative)
VectorizationBase.ifelse(isinf, Inf, x36)
end
println("Defining log")
@time @testset "Defining log." begin
vx = Vec(
ntuple(
_ -> rand(),
VectorizationBase.StaticInt(3) *
VectorizationBase.pick_vector_width(Float64)
)...
)
check_within_limits(tovector(@inferred(vlog(vx))), log.(tovector(vx)))
end
println("Saturated add")
@time @testset "Saturated add" begin
@test VectorizationBase.saturated_add(0xf0, 0xf0) === 0xff
@test VectorizationBase.saturated_add(
2_000_000_000 % Int32,
1_000_000_000 % Int32
) === typemax(Int32)
v = Vec(
ntuple(
_ -> rand(typemax(UInt)>>1+one(UInt):typemax(UInt)),
VectorizationBase.pick_vector_width(UInt)
)...
)
@test VectorizationBase.saturated_add(v, v) ===
vbroadcast(VectorizationBase.pick_vector_width(UInt), typemax(UInt))
end
println("Special Functions")
using SpecialFunctions
@time @testset "Special Functions" begin
for T ∈ [Float32, Float64]
min_non_denormal = nextfloat(
abs(
reinterpret(T, typemax(Base.uinttype(T)) & (~Base.exponent_mask(T)))
)
)
l2mnd = log2(min_non_denormal)
xx = collect(range(T(0.8) * l2mnd, T(0.8) * abs(l2mnd); length = 2^21))
test_acc(exp2, exp2, T, xx, 3)
lemnd = log(min_non_denormal)
xx .= range(T(0.8) * lemnd, T(0.8) * abs(lemnd); length = 2^21)
test_acc(exp, exp, T, xx, 3)
l10mnd = log10(min_non_denormal)
xx .= range(T(0.8) * l10mnd, T(0.8) * abs(l10mnd); length = 2^21)
test_acc(exp10, exp10, T, xx, 3)
if T === Float32
xx .= range(-4.0f0, 4.0f0; length = 2^21)
erftol = 3
else
xx .= range(-6.0, 6.0; length = 2^21)
erftol = 7
end
test_acc(VectorizationBase.verf, erf, T, xx, erftol)
# xx .= exp2.(range(T(0.8)*l2mnd, T(0.8)*abs(l2mnd), length = 2^20));
# test_acc(VectorizationBase.vlog2, log2, T, xx, 7)
end
@test exp(VecUnroll((1.1, 2.3))) ===
VecUnroll((3.0041660239464334, 9.97418245481472))
@test exp(VecUnroll((1, 2))) ===
VecUnroll((2.7182818284590455, 7.3890560989306495))
end
# fix the stackoverflow error in `vmax_fast`, `vmax`, `vmin` and `vmin_fast` for floating types
@time @testset "fix stackoverflow for `vmax_fast` et al." begin
@test VectorizationBase.vmax_fast(1.0, 3.0) === 3.0
@test VectorizationBase.vmax_fast(1, 3) === 3
@test VectorizationBase.vmin_fast(1, 3) === 1
@test VectorizationBase.vmin_fast(1.0, 3.0) === 1.0
@test VectorizationBase.vmax(1.0, 3.0) === 3.0
@test VectorizationBase.vmax(1, 3) === 3
@test VectorizationBase.vmin(1, 3) === 1
@test VectorizationBase.vmin(1.0, 3.0) === 1.0
end
@time @testset "vmax/vmin Bool" begin
t, f = Vec{4,Bool}(true, true, false, false),
Vec{4,Bool}(true, false, true, false)
@test VectorizationBase.vmax_fast(t, f) ===
@fastmath(max(t, f)) ===
Vec{4,Bool}(true, true, true, false)
@test VectorizationBase.vmin_fast(t, f) ===
@fastmath(min(t, f)) ===
Vec{4,Bool}(true, false, false, false)
@test VectorizationBase.vmax(t, f) ===
max(t, f) ===
Vec{4,Bool}(true, true, true, false)
@test VectorizationBase.vmin(t, f) ===
min(t, f) ===
Vec{4,Bool}(true, false, false, false)
tm = Mask{4}(0xc)
fm = Mask{4}(0xa)
@test @fastmath(max(tm, fm)) === max(tm, fm) === Mask{4}(0xe)
@test @fastmath(min(tm, fm)) === min(tm, fm) === Mask{4}(0x8)
end
@time @testset "Generic strided pointer" begin
A = rand(ComplexF64, 3, 4)
x = ["hi" "howdy"; "greetings" "hello"]
GC.@preserve A x begin
@test A[2, 3] === vload(stridedpointer(A), (2, 3))
c = 123.0 - 456.0im
vstore!(stridedpointer(A), c, (3, 2))
@test A[3, 2] == c
@test x[1] === vload(stridedpointer(x), (0,))
@test x[3] === vload(stridedpointer(x), (2,))
w = "welcome!"
vstore!(stridedpointer(x), w, (1,))
@test w === x[2]
h = "hallo"
vstore!(stridedpointer(x), h, (2, 2))
@test x[2, 2] === h
vload(stridedpointer(x), (1, 2)) === x[1, 2]
end
end
@testset "NullStep" begin
A = rand(4, 5)
GC.@preserve A begin
@test @inferred(
vload(
VectorizationBase.gesp(
VectorizationBase.stridedpointer(A),
(VectorizationBase.NullStep(), VectorizationBase.NullStep())
),
(1, 2)
)
) == A[1, 2]
@test @inferred(
vload(
VectorizationBase.gesp(
VectorizationBase.stridedpointer(A),
(StaticInt(0), VectorizationBase.NullStep())
),
(2, 3)
)
) == A[2, 3]
@test @inferred(
vload(
VectorizationBase.gesp(
VectorizationBase.stridedpointer(A),
(VectorizationBase.NullStep(), StaticInt(0))
),
(3, 4)
)
) == A[3, 4]
end
B = A .> 0.5
spb = stridedpointer(B)
@test VectorizationBase.gesp(spb, (3,)) ===
VectorizationBase.gesp(spb, (3, 0))
end
# end
end # @testset VectorizationBase
# ptr_A = pointer(A)
# vA = VectorizationBase.stridedpointer(A)
# Att = copy(A')'
# vAtt = VectorizationBase.stridedpointer(Att)
# @test eltype(vA) == Float64
# @test Base.unsafe_convert(Ptr{Float64}, vA) === ptr_A === pointer(vA)
# @test vA == VectorizationBase.stridedpointer(vA)
# @test all(i -> A[i+1] === VectorizationBase.vload(ptr_A + 8i) === VectorizationBase.vload(vA, (i,)) === Float64(i), 0:15)
# VectorizationBase.vstore!(vA, 99.9, (3,))
# @test 99.9 === VectorizationBase.vload(ptr_A + 8*3) === VectorizationBase.vload(vA, (VectorizationBase.StaticInt(3),)) === VectorizationBase.vload(vA, (3,0)) === A[4,1]
# VectorizationBase.vstore!(vAtt, 99.9, (3,1))
# @test 99.9 === VectorizationBase.vload(vAtt, (3,1)) === VectorizationBase.vload(vAtt, (VectorizationBase.StaticInt(3),1)) === Att[4,2]
# VectorizationBase.vnoaliasstore!(ptr_A+8*4, 999.9)
# @test 999.9 === VectorizationBase.vload(ptr_A + 8*4) === VectorizationBase.vload(pointer(vA), 4*sizeof(eltype(A))) === VectorizationBase.vload(vA, (4,))
# @test vload(vA, (7,2)) == vload(vAtt, (7,2)) == A[8,3]
# @test vload(VectorizationBase.subsetview(vA, Val(1), 7), (2,)) == vload(VectorizationBase.subsetview(vAtt, Val(1), 7), (2,)) == A[8,3]
# @test vload(VectorizationBase.subsetview(vA, Val(2), 2), (7,)) == vload(VectorizationBase.subsetview(vAtt, Val(2), 2), (7,)) == A[8,3]
# @test vload(VectorizationBase.double_index(vA, Val(0), Val(1)), (2,)) == vload(VectorizationBase.double_index(vA, Val(0), Val(1)), (VectorizationBase.StaticInt(2),)) == A[3,3]
# @test vload(VectorizationBase.double_index(vAtt, Val(0), Val(1)), (1,)) == vload(VectorizationBase.double_index(vAtt, Val(0), Val(1)), (VectorizationBase.StaticInt(1),)) == A[2,2]
# B = rand(5, 5)
# vB = VectorizationBase.stridedpointer(B)
# @test vB[1, 2] == B[2, 3] == vload(VectorizationBase.stridedpointer(B, 2, 3))
# @test vB[3] == B[4] == vload(VectorizationBase.stridedpointer(B, 4))
# @test vload(Vec{4,Float64}, vB) == Vec{4,Float64}(ntuple(i->B[i], Val(4)))
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | code | 2472 | # Seperate file to make it easier to include separately from REPL for running pieces
using VectorizationBase, OffsetArrays
using VectorizationBase: data
using Test
const W64S = VectorizationBase.pick_vector_width(Float64)
const W64 = Int(VectorizationBase.register_size() ÷ sizeof(Float64))
const W32 = Int(VectorizationBase.register_size() ÷ sizeof(Float32))
const VE = Core.VecElement
randnvec(N = Val{W64}()) = Vec(ntuple(_ -> Core.VecElement(randn()), N))
function tovector(u::VectorizationBase.VecUnroll{_N,W,_T}) where {_N,W,_T}
T = _T === VectorizationBase.Bit ? Bool : _T
N = _N + 1
i = 0
x = Vector{T}(undef, N * W)
for n ∈ 1:N
v = VectorizationBase.data(u)[n]
for w ∈ 0:W-1
x[(i+=1)] = VectorizationBase.extractelement(v, w)
end
end
x
end
tovector(v::VectorizationBase.AbstractSIMDVector{W}) where {W} =
[VectorizationBase.extractelement(v, w) for w ∈ 0:W-1]
tovector(v::VectorizationBase.LazyMulAdd) =
tovector(VectorizationBase._materialize(v))
tovector(x) = x
tovector(i::MM{W,X}) where {W,X} = collect(range(data(i); step = X, length = W))
tovector(
i::MM{W,X,I}
) where {W,X,I<:Union{Int8,Int16,Int32,Int64,UInt8,UInt16,UInt32,UInt64}} =
collect(range(data(i); step = I(X), length = I(W)))
A = randn(13, 17);
L = length(A);
M, N = size(A);
trunc_int(x::Integer, ::Type{T}) where {T} = x % T
trunc_int(x, ::Type{T}) where {T} = x
size_trunc_int(x::Signed, ::Type{T}) where {T} = signed(x % T)
size_trunc_int(x::Unsigned, ::Type{T}) where {T} = unsigned(x % T)
size_trunc_int(x, ::Type{T}) where {T} = x
check_within_limits(x, y) = @test x ≈ y
function check_within_limits(x::Vector{T}, y) where {T<:Integer}
if Bool(VectorizationBase.has_feature(Val(:x86_64_avx512dq)))
return @test x ≈ y
end
r = typemin(Int32) .≤ y .≤ typemax(Int32)
xs = x[r]
ys = y[r]
@test xs ≈ ys
end
maxi(a, b) = max(a, b)
mini(a, b) = min(a, b)
function maxi(a::T1, b::T2) where {T1<:Base.BitInteger,T2<:Base.BitInteger}
T = promote_type(T1, T2)
T(a > b ? a : b)
end
function mini(a::T1, b::T2) where {T1<:Base.BitInteger,T2<:Base.BitInteger}
_T = promote_type(T1, T2)
T = if T1 <: Signed || T2 <: Signed
signed(_T)
else
_T
end
T(a < b ? a : b)
end
maxi_fast(a, b) = Base.FastMath.max_fast(a, b)
mini_fast(a, b) = Base.FastMath.min_fast(a, b)
maxi_fast(a::Base.BitInteger, b::Base.BitInteger) = maxi(a, b)
mini_fast(a::Base.BitInteger, b::Base.BitInteger) = mini(a, b)
include("accuracy.jl")
nothing
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | docs | 5747 | # VectorizationBase
[](https://JuliaSIMD.github.io/VectorizationBase.jl/stable)
[](https://JuliaSIMD.github.io/VectorizationBase.jl/dev)
[](https://github.com/JuliaSIMD/VectorizationBase.jl/actions?query=workflow%3ACI)
[/badge.svg)](https://github.com/JuliaSIMD/VectorizationBase.jl/actions?query=workflow%3A%22CI+%28Julia+nightly%29%22)
[](https://codecov.io/gh/JuliaSIMD/VectorizationBase.jl)
---
# NOTE: Looking for new maintainers, otherwise deprecated in Julia 1.11
This is a library providing basic SIMD support in Julia. VectorizationBase exists in large part to serve the needs of [LoopVectorization.jl](https://github.com/JuliaSIMD/LoopVectorization.jl)'s code gen, prioritizing this over a stable user-facing API. Thus, you may wish to consider [SIMD.jl](https://github.com/eschnett/SIMD.jl) as an alternative when writing explicit SIMD code in Julia. That said, the `Vec` and `VecUnroll` types are meant to "just work" as much as possible when passed to user-defined functions, so it should be reasonably stable in practice. Other parts of the code -- e.g, loading and storing vectors as well as the `stridedpointer` function -- will hopefully converge reasonably soon, and have support for various `AbstractArray` types propogated through the ecosystem by taking advantage of [ArrayInterface.jl](https://github.com/SciML/ArrayInterface.jl), so that VectorizationBase can begin to offer a stable, ergonomic, and well supported API fairly soon.
It additionally provides some information on the host computer it is running on, which can be used to automate target-specific optimizations. Currently, x86_64 support is best on that front, but I'm looking to improve the quality of information provided for other architectures.
`Vec`s are `Number`s and behave as a single objects; they just happen to contain multiple `Float64`. Therefore, it will behave like a single number rather than a collection with respect to indexing and reductions:
```julia
julia> using VectorizationBase
julia> vx = Vec(ntuple(_ -> 10randn(), VectorizationBase.pick_vector_width(Float64))...)
Vec{8,Float64}<14.424983437388981, -7.7378330531368045, -3.499708331670689, -3.358981392002452, 22.519898671389406, -13.08647686033593, 13.96943264299162, -9.518537139443254>
julia> vx[1]
Vec{8,Float64}<14.424983437388981, -7.7378330531368045, -3.499708331670689, -3.358981392002452, 22.519898671389406, -13.08647686033593, 13.96943264299162, -9.518537139443254>
julia> sum(vx)
Vec{8,Float64}<14.424983437388981, -7.7378330531368045, -3.499708331670689, -3.358981392002452, 22.519898671389406, -13.08647686033593, 13.96943264299162, -9.518537139443254>
julia> a = 1.2;
julia> a[1]
1.2
julia> sum(a)
1.2
```
To extract elements from a `Vec`, you call it, using parenthesis to index as you would in Fortran or MATLAB:
```julia
julia> vx(1), vx(2)
(14.424983437388981, -7.7378330531368045)
julia> ntuple(vx, Val(8))
(14.424983437388981, -7.7378330531368045, -3.499708331670689, -3.358981392002452, 22.519898671389406, -13.08647686033593, 13.96943264299162, -9.518537139443254)
julia> Tuple(vx) # defined for convenience
(14.424983437388981, -7.7378330531368045, -3.499708331670689, -3.358981392002452, 22.519898671389406, -13.08647686033593, 13.96943264299162, -9.518537139443254)
```
Unfortunately, this means no support for indexing with `begin`/`end`.
Reductions are like the ordinary version, but prefixed with `v`:
```julia
julia> using VectorizationBase: vsum, vprod, vmaximum, vminimum
julia> vsum(vx), sum(Tuple(vx))
(13.712777975180877, 13.712777975180877)
julia> vprod(vx), prod(Tuple(vx))
(-5.141765647043406e7, -5.141765647043406e7)
julia> vmaximum(vx), maximum(Tuple(vx))
(22.519898671389406, 22.519898671389406)
julia> vminimum(vx), minimum(Tuple(vx))
(-13.08647686033593, -13.08647686033593)
```
Here is an example of using `vload`:
```julia
julia> A = rand(8,8);
julia> vload(stridedpointer(A), (MM(W, 1), 1))
Vec{8, Float64}<0.23659378106523243, 0.1572296679962767, 0.4139998988982545, 0.4068544124895789, 0.6365683129363592, 0.10041731176364777, 0.6198701180649783, 0.18351031426464992>
julia> A[1:W,1]'
1×8 adjoint(::Vector{Float64}) with eltype Float64:
0.236594 0.15723 0.414 0.406854 0.636568 0.100417 0.61987 0.18351
julia> vload(stridedpointer(A), (1, MM(W, 1)))
Vec{8, Float64}<0.23659378106523243, 0.43800087768259754, 0.5833216557209256, 0.8076063696863035, 0.12069215155721758, 0.6015627184700922, 0.1390837892914757, 0.9139206013822945>
julia> A[1,1:W]'
1×8 adjoint(::Vector{Float64}) with eltype Float64:
0.236594 0.438001 0.583322 0.807606 0.120692 0.601563 0.139084 0.913921
julia> vload(stridedpointer(A), (MM(W,1), MM(W, 1)))
Vec{8, Float64}<0.23659378106523243, 0.7580627352162604, 0.044776171518136954, 0.218587536875811, 0.4596625543892163, 0.2933303822991349, 0.30481677678671315, 0.3595115888246907>
julia> getindex.(Ref(A), 1:W, 1:W)'
1×8 adjoint(::Vector{Float64}) with eltype Float64:
0.236594 0.758063 0.0447762 0.218588 0.459663 0.29333 0.304817 0.359512
```
The basic idea is that you have a tuple of indices. The `MM` type indicates that it is vectorized. In the above example, we vectorize the load along colums, then rows, and then both. This is equivalent to loading the column, row, and diagonal.
Note that you can pass a `Mask` argument to mask off extra loads/stores.
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.21.70 | e7f5b81c65eb858bed630fe006837b935518aca5 | docs | 86 | # VectorizationBase.jl
```@index
```
```@autodocs
Modules = [VectorizationBase]
```
| VectorizationBase | https://github.com/JuliaSIMD/VectorizationBase.jl.git |
|
[
"MIT"
] | 0.8.0 | 852bd0f55565a9e973fcfee83a84413270224dc4 | code | 18827 | ## Interface to the Rmath library emulating the d-p-q-r functions in R.
## This module is for archival purposes. The interface in the
## Distributions module is much more effective.
module Rmath
using Rmath_jll
using Random
export dbeta,pbeta,qbeta,rbeta # Beta distribution (shape1, shape2)
export dbinom,pbinom,qbinom,rbinom # Binomial distribution (size, prob)
export dcauchy,pcauchy,qcauchy,rcauchy # Cauchy distribution (location, scale)
export dchisq,pchisq,qchisq,rchisq # Central Chi-squared distribution (df)
export dexp,pexp,qexp,rexp # Exponential distribution (rate)
export df,pf,qf,rf # Central F distribution (df1,df2)
export dgamma,pgamma,qgamma,rgamma # Gamma distribution (shape, scale)
export dgeom,pgeom,qgeom,rgeom # Geometric distribution (prob)
export dhyper,phyper,qhyper,rhyper # Hypergeometric (m, n, k)
export dlnorm,plnorm,qlnorm,rlnorm # Log-normal distribution (meanlog, sdlog)
export dlogis,plogis,qlogis,rlogis # Logistic distribution (location, scale)
export dnbeta,pnbeta,qnbeta,rnbeta # Non-central beta (shape1, shape2, ncp)
export dnbinom,pnbinom,qnbinom,rnbinom # Negative binomial distribution (size, prob)
export dnbinom_mu,pnbinom_mu,qnbinom_mu,rnbinom_mu # Negative binomial distribution (size, mu)
export dnchisq,pnchisq,qnchisq,rnchisq # Noncentral Chi-squared distribution (df, ncp)
export dnf,pnf,qnf,rnf # Non-central F (df1, df2, ncp)
export dnorm,pnorm,qnorm,rnorm # Normal (Gaussian) distribution (mu, sigma)
export dpois,ppois,qpois,rpois # Poisson distribution (lambda)
export dsignrank,psignrank,qsignrank,rsignrank
export dt,pt,qt,rt # Student's t distribution (df)
export dunif,punif,qunif,runif # Uniform distribution (min, max)
export dweibull,pweibull,qweibull,rweibull # Weibull distribution (shape, scale)
export dwilcox,pwilcox,qwilcox,rwilcox # Wilcox's Rank Sum statistic (m, n)
export ptukey, qtukey # Studentized Range Distribution - p and q only
function __init__()
# initialize RNG hooks
unsafe_store!(cglobal((:unif_rand_ptr,libRmath),Ptr{Cvoid}),
@cfunction(rand,Float64,()))
unsafe_store!(cglobal((:norm_rand_ptr,libRmath),Ptr{Cvoid}),
@cfunction(randn,Float64,()))
unsafe_store!(cglobal((:exp_rand_ptr,libRmath),Ptr{Cvoid}),
@cfunction(Random.randexp,Float64,()))
end
## Macro for deferring freeing data until GC for wilcox and signrank
macro libRmath_deferred_free(base)
libcall = Symbol(base, "_free")
func = Symbol(base, "_deferred_free")
esc(quote
let gc_tracking_obj = []
global $func
function $libcall(x::Vector)
gc_tracking_obj = []
ccall(($(string(libcall)),libRmath), Cvoid, ())
end
function $func()
if !isa(gc_tracking_obj, Bool)
finalizer($libcall, gc_tracking_obj)
gc_tracking_obj = false
end
end
end
end)
end
## Non-ccall functions for distributions with 1 parameter and no defaults
macro libRmath_1par_0d_aliases(base)
dd = Symbol("d", base)
pp = Symbol("p", base)
qq = Symbol("q", base)
esc(quote
$dd(x::Number, p1::Number) = $dd(x, p1, false)
$pp(q::Number, p1::Number, lower_tail::Bool) = $pp(q, p1, lower_tail, false)
$pp(q::Number, p1::Number) = $pp(q, p1, true, false)
$qq(p::Number, p1::Number, lower_tail::Bool) = $qq(p, p1, lower_tail, false)
$qq(p::Number, p1::Number) = $qq(p, p1, true, false)
end)
end
## Distributions with 1 parameter and no default
macro libRmath_1par_0d(base)
dd = Symbol("d", base)
pp = Symbol("p", base)
qq = Symbol("q", base)
rr = Symbol("r", base)
esc(quote
$dd(x::Number, p1::Number, give_log::Bool) =
ccall(($(string(dd)),libRmath), Float64,
(Float64,Float64,Int32), x, p1, give_log)
$pp(q::Number, p1::Number, lower_tail::Bool, log_p::Bool) =
ccall(($(string(pp)),libRmath), Float64,
(Float64,Float64,Int32,Int32), q, p1, lower_tail, log_p)
$qq(p::Number, p1::Number, lower_tail::Bool, log_p::Bool) =
ccall(($(string(qq)),libRmath), Float64,
(Float64,Float64,Int32,Int32), p, p1, lower_tail, log_p)
$rr(nn::Integer, p1::Number) =
[ccall(($(string(rr)),libRmath), Float64, (Float64,), p1) for i=1:nn]
@libRmath_1par_0d_aliases $(base)
end)
end
@libRmath_1par_0d t
@libRmath_1par_0d chisq
@libRmath_1par_0d pois
@libRmath_1par_0d geom
## The d-p-q functions in Rmath for signrank allocate storage that must be freed
## Signrank - Wilcoxon Signed Rank statistic
@libRmath_deferred_free signrank
function dsignrank(x::Number, p1::Number, give_log::Bool)
signrank_deferred_free()
ccall((:dsignrank,libRmath), Float64, (Float64,Float64,Int32), x, p1, give_log)
end
function psignrank(q::Number, p1::Number, lower_tail::Bool, log_p::Bool)
signrank_deferred_free()
ccall((:psignrank,libRmath), Float64, (Float64,Float64,Int32,Int32), q, p1, lower_tail, log_p)
end
function qsignrank(p::Number, p1::Number, lower_tail::Bool, log_p::Bool)
signrank_deferred_free()
ccall((:qsignrank,libRmath), Float64, (Float64,Float64,Int32,Int32), p, p1, lower_tail, log_p)
end
@libRmath_1par_0d_aliases signrank
rsignrank(nn::Integer, p1::Number) =
[ccall((:rsignrank,libRmath), Float64, (Float64,), p1) for i=1:nn]
## Distributions with 1 parameter and a default
macro libRmath_1par_1d(base, d1)
dd = Symbol("d", base)
pp = Symbol("p", base)
qq = Symbol("q", base)
rr = Symbol("r", base)
esc(quote
$dd(x::Number, p1::Number, give_log::Bool) =
ccall(($(string(dd)),libRmath), Float64, (Float64,Float64,Int32), x, p1, give_log)
$dd(x::Number, give_log::Bool) = $dd(x, $d1, give_log)
$dd(x::Number, p1::Number) = $dd(x, p1, false)
$dd(x::Number) = $dd(x, $d1, false)
$pp(q::Number, p1::Number, lower_tail::Bool, log_p::Bool) =
ccall(($(string(pp)),libRmath), Float64, (Float64,Float64,Int32,Int32), q, p1, lower_tail, log_p)
$pp(q::Number, lower_tail::Bool, log_p::Bool) = $pp(q, $d1, lower_tail, log_p)
$pp(q::Number, p1::Number, lower_tail::Bool) = $pp(q, p1, lower_tail, false)
$pp(q::Number, lower_tail::Bool) = $pp(q, $d1, lower_tail, false)
$pp(q::Number, p1::Number) = $pp(q, p1, true, false)
$pp(q::Number) = $pp(q, $d1, true, false)
$qq(p::Number, p1::Number, lower_tail::Bool, log_p::Bool) =
ccall(($(string(qq)),libRmath), Float64, (Float64,Float64,Int32,Int32), p, p1, lower_tail, log_p)
$qq(p::Number, lower_tail::Bool, log_p::Bool) = $qq(p, $d1, lower_tail, log_p)
$qq(p::Number, p1::Number, lower_tail::Bool) = $qq(p, p1, lower_tail, false)
$qq(p::Number, lower_tail::Bool) = $qq(p, $d1, lower_tail, false)
$qq(p::Number, p1::Number) = $qq(p, p1, true, false)
$qq(p::Number) = $qq(p, $d1, true, false)
$rr(nn::Integer, p1::Number) =
[ccall(($(string(rr)),libRmath), Float64, (Float64,), p1) for i=1:nn]
$rr(nn::Integer) = $rr(nn, $d1)
end)
end
## May need to handle this as a special case. The Rmath library uses 1/rate, not rate
@libRmath_1par_1d exp 1 # Exponential distribution (rate)
## Non-ccall functions for distributions with 2 parameters and no defaults
macro libRmath_2par_0d_aliases(base)
dd = Symbol("d", base)
pp = Symbol("p", base)
qq = Symbol("q", base)
esc(quote
$dd(x::Number, p1::Number, p2::Number) = $dd(x, p1, p2, false)
$pp(q::Number, p1::Number, p2::Number, lower_tail::Bool) = $pp(q, p1, p2, lower_tail, false)
$pp(q::Number, p1::Number, p2::Number) = $pp(q, p1, p2, true, false)
$qq(p::Number, p1::Number, p2::Number, lower_tail::Bool) = $qq(p, p1, p2, lower_tail, false)
$qq(p::Number, p1::Number, p2::Number) = $qq(p, p1, p2, true, false)
end)
end
## Distributions with 2 parameters and no defaults
macro libRmath_2par_0d(base)
dd = Symbol("d", base)
pp = Symbol("p", base)
qq = Symbol("q", base)
rr = Symbol("r", base)
esc(quote
$dd(x::Number, p1::Number, p2::Number, give_log::Bool) =
ccall(($(string(dd)),libRmath), Float64, (Float64,Float64,Float64,Int32), x, p1, p2, give_log)
$pp(q::Number, p1::Number, p2::Number, lower_tail::Bool, log_p::Bool) =
ccall(($(string(pp)),libRmath), Float64, (Float64,Float64,Float64,Int32,Int32), q, p1, p2, lower_tail, log_p)
$qq(p::Number, p1::Number, p2::Number, lower_tail::Bool, log_p::Bool) =
ccall(($(string(qq)),libRmath), Float64, (Float64,Float64,Float64,Int32,Int32), p, p1, p2, lower_tail, log_p)
$rr(nn::Integer, p1::Number, p2::Number) =
[ccall(($(string(rr)),libRmath), Float64, (Float64,Float64), p1, p2) for i=1:nn]
@libRmath_2par_0d_aliases $base
end)
end
@libRmath_2par_0d beta # Beta distribution (shape1, shape2)
@libRmath_2par_0d binom # Binomial distribution (size, prob)
@libRmath_2par_0d f # Central F distribution (df1, df2)
@libRmath_2par_0d nbinom # Negative binomial distribution (size, prob)
@libRmath_2par_0d nbinom_mu # Negative binomial distribution (size, mu)
@libRmath_2par_0d nchisq # Noncentral Chi-squared distribution (df, ncp)
## Need to handle the d-p-q for Wilcox separately because the Rmath functions allocate storage that must be freed.
## Wilcox - Wilcox's Rank Sum statistic (m, n) - probably only makes sense for positive integers
@libRmath_deferred_free wilcox
function dwilcox(x::Number, p1::Number, p2::Number, give_log::Bool)
wilcox_deferred_free()
ccall((:dwilcox,libRmath), Float64, (Float64,Float64,Float64,Int32), x, p1, p2, give_log)
end
function pwilcox(q::Number, p1::Number, p2::Number, lower_tail::Bool, log_p::Bool)
wilcox_deferred_free()
ccall((:pwilcox,libRmath), Float64, (Float64,Float64,Float64,Int32,Int32), q, p1, p2, lower_tail, log_p)
end
function qwilcox(p::Number, p1::Number, p2::Number, lower_tail::Bool, log_p::Bool)
wilcox_deferred_free()
ccall((:qwilcox,libRmath), Float64, (Float64,Float64,Float64,Int32,Int32), p, p1, p2, lower_tail, log_p)
end
rwilcox(nn::Integer, p1::Number, p2::Number) =
[ccall((:rwilcox,libRmath), Float64, (Float64,Float64), p1, p2) for i=1:nn]
@libRmath_2par_0d_aliases wilcox
## Distributions with 2 parameters and 1 default
macro libRmath_2par_1d(base, d2)
dd = Symbol("d", base)
pp = Symbol("p", base)
qq = Symbol("q", base)
rr = Symbol("r", base)
esc(quote
$dd(x::Number, p1::Number, p2::Number, give_log::Bool) =
ccall(($(string(dd)),libRmath), Float64, (Float64,Float64,Float64,Int32), x, p1, p2, give_log)
$dd(x::Number, p1::Number, give_log::Bool) = $dd(x, p1, $d2, give_log)
$dd(x::Number, p1::Number, p2::Number) = $dd(x, p1, p2, false)
$dd(x::Number, p1::Number) = $dd(x, p1, $d2, false)
$pp(q::Number, p1::Number, p2::Number, lower_tail::Bool, log_p::Bool) =
ccall(($(string(pp)),libRmath), Float64, (Float64,Float64,Float64,Int32,Int32), q, p1, p2, lower_tail, log_p)
$pp(q::Number, p1::Number, lower_tail::Bool, log_p::Bool) = $pp(q, p1, $d2, lower_tail, log_p)
$pp(q::Number, p1::Number, p2::Number, lower_tail::Bool) = $pp(q, p1, p2, lower_tail, false)
$pp(q::Number, p1::Number, lower_tail::Bool) = $pp(q, p1, $d2, lower_tail, false)
$pp(q::Number, p1::Number, p2::Number) = $pp(q, p1, p2, true, false)
$pp(q::Number, p1::Number) = $pp(q, p1, $d2, true, false)
$qq(p::Number, p1::Number, p2::Number, lower_tail::Bool, log_p::Bool) =
ccall(($(string(qq)),libRmath), Float64, (Float64,Float64,Float64,Int32,Int32), p, p1, p2, lower_tail, log_p)
$qq(p::Number, p1::Number, lower_tail::Bool, log_p::Bool) = $qq(p, p1, $d2, lower_tail, log_p)
$qq(p::Number, p1::Number, p2::Number, lower_tail::Bool) = $qq(p, p1, p2, lower_tail, false)
$qq(p::Number, p1::Number, lower_tail::Bool) = $qq(p, p1, $d2, lower_tail, false)
$qq(p::Number, p1::Number, p2::Number) = $qq(p, p1, p2, true, false)
$qq(p::Number, p1::Number) = $qq(p, p1, $d2, true, false)
$rr(nn::Integer, p1::Number, p2::Number) =
[ccall(($(string(rr)),libRmath), Float64, (Float64,Float64), p1, p2) for i=1:nn]
$rr(nn::Integer, p1::Number) = $rr(nn, p1, $d2)
end)
end
@libRmath_2par_1d gamma 1 # Gamma distribution (shape, scale)
@libRmath_2par_1d weibull 1 # Weibull distribution (shape, scale)
## Distributions with 2 parameters and 2 defaults
macro libRmath_2par_2d(base, d1, d2)
ddsym = dd = Symbol("d", base)
ppsym = pp = Symbol("p", base)
qqsym = qq = Symbol("q", base)
rr = Symbol("r", base)
if (string(base) == "norm")
ddsym = :dnorm4
ppsym = :pnorm5
qqsym = :qnorm5
end
esc(quote
$dd(x::Number, p1::Number, p2::Number, give_log::Bool) =
ccall(($(string(ddsym)),libRmath), Float64, (Float64,Float64,Float64,Int32), x, p1, p2, give_log)
$dd(x::Number, p1::Number, give_log::Bool) = $dd(x, p1, $d2, give_log)
$dd(x::Number, give_log::Bool) = $dd(x, $d1, $d2, give_log)
$dd(x::Number, p1::Number, p2::Number) = $dd(x, p1, p2, false)
$dd(x::Number, p1::Number) = $dd(x, p1, $d2, false)
$dd(x::Number) = $dd(x, $d1, $d2, false)
$pp(q::Number, p1::Number, p2::Number, lower_tail::Bool, log_p::Bool) =
ccall(($(string(ppsym)),libRmath), Float64, (Float64,Float64,Float64,Int32,Int32), q, p1, p2, lower_tail, log_p)
$pp(q::Number, p1::Number, lower_tail::Bool, log_p::Bool) = $pp(q, p1, $d2, lower_tail, log_p)
$pp(q::Number, lower_tail::Bool, log_p::Bool) = $pp(q, $d1, $d2, lower_tail, log_p)
$pp(q::Number, p1::Number, p2::Number, lower_tail::Bool) = $pp(q, p1, p2, lower_tail, false)
$pp(q::Number, p1::Number, lower_tail::Bool) = $pp(q, p1, $d2, lower_tail, false)
$pp(q::Number, lower_tail::Bool) = $pp(q, $d1, $d2, lower_tail, false)
$pp(q::Number, p1::Number, p2::Number) = $pp(q, p1, p2, true, false)
$pp(q::Number, p1::Number) = $pp(q, p1, $d2, true, false)
$pp(q::Number) = $pp(q, $d1, $d2, true, false)
$qq(p::Number, p1::Number, p2::Number, lower_tail::Bool, log_p::Bool) =
ccall(($(string(qqsym)),libRmath), Float64, (Float64,Float64,Float64,Int32,Int32), p, p1, p2, lower_tail, log_p)
$qq(p::Number, p1::Number, lower_tail::Bool, log_p::Bool) = $qq(p, p1, $d2, lower_tail, log_p)
$qq(p::Number, lower_tail::Bool, log_p::Bool) = $qq(p, $d1, $d2, lower_tail, log_p)
$qq(p::Number, p1::Number, p2::Number, lower_tail::Bool) = $qq(p, p1, p2, lower_tail, false)
$qq(p::Number, p1::Number, lower_tail::Bool) = $qq(p, p1, $d2, lower_tail, false)
$qq(p::Number, lower_tail::Bool) = $qq(p, $d1, $d2, lower_tail, false)
$qq(p::Number, p1::Number, p2::Number) = $qq(p, p1, p2, true, false)
$qq(p::Number, p1::Number) = $qq(p, p1, $d2, true, false)
$qq(p::Number) = $qq(p, $d1, $d2, true, false)
$rr(nn::Integer, p1::Number, p2::Number) =
[ccall(($(string(rr)),libRmath), Float64, (Float64,Float64), p1, p2) for i=1:nn]
$rr(nn::Integer, p1::Number) = $rr(nn, p1, $d2)
$rr(nn::Integer) = $rr(nn, $d1, $d2)
end)
end
@libRmath_2par_2d cauchy 0 1 # Cauchy distribution (location, scale)
@libRmath_2par_2d lnorm 0 1 # Log-normal distribution (meanlog, sdlog)
@libRmath_2par_2d logis 0 1 # Logistic distribution (location, scale)
@libRmath_2par_2d norm 0 1 # Normal (Gaussian) distribution (mu, sd)
@libRmath_2par_2d unif 0 1 # Uniform distribution (min, max)
## Distributions with 3 parameters and no defaults
macro libRmath_3par_0d(base)
dd = Symbol("d", base)
pp = Symbol("p", base)
qq = Symbol("q", base)
rr = Symbol("r", base)
esc(quote
$dd(x::Number, p1::Number, p2::Number, p3::Number, give_log::Bool) =
ccall(($(string(dd)),libRmath), Float64, (Float64,Float64,Float64,Float64,Int32), x, p1, p2, p3, give_log)
$dd(x::Number, p1::Number, p2::Number, p3::Number) = $dd(x, p1, p2, p3, false)
$pp(q::Number, p1::Number, p2::Number, p3::Number, lower_tail::Bool, log_p::Bool) =
ccall(($(string(pp)),libRmath), Float64, (Float64,Float64,Float64,Float64,Int32,Int32), q, p1, p2, p3, lower_tail, log_p)
$pp(q::Number, p1::Number, p2::Number, p3::Number, lower_tail::Bool) = $pp(q, p1, p2, p3, lower_tail, false)
$pp(q::Number, p1::Number, p2::Number, p3::Number) = $pp(q, p1, p2, p3, true, false)
$qq(p::Number, p1::Number, p2::Number, p3::Number, lower_tail::Bool, log_p::Bool) =
ccall(($(string(qq)),libRmath), Float64, (Float64,Float64,Float64,Float64,Int32,Int32), p, p1, p2, p3, lower_tail, log_p)
$qq(p::Number, p1::Number, p2::Number, p3::Number, lower_tail::Bool) = $qq(p, p1, p2, p3, lower_tail, false)
$qq(p::Number, p1::Number, p2::Number, p3::Number) = $qq(p, p1, p2, p3, true, false)
$rr(nn::Integer, p1::Number, p2::Number, p3::Number) =
[ccall(($(string(rr)), libRmath), Float64, (Float64, Float64, Float64), p1, p2, p3) for i = 1:nn]
end)
end
@libRmath_3par_0d hyper # Hypergeometric (m, n, k)
@libRmath_3par_0d nbeta # Non-central beta (shape1, shape2, ncp)
@libRmath_3par_0d nf # Non-central F (df1, df2, ncp)
## tukey (Studentized Range Distribution - p and q only - 3pars)
ptukey(q::Number, nmeans::Number, df::Number, nranges::Number=1.0,
lower_tail::Bool=true, log_p::Bool=false) =
ccall((:ptukey, libRmath), Float64,
(Float64, Float64, Float64, Float64, Int32, Int32),
q, nranges, nmeans, df, lower_tail, log_p)
qtukey(q::Number, nmeans::Number, df::Number, nranges::Number=1.0,
lower_tail::Bool=true, log_p::Bool=false) =
ccall((:qtukey ,libRmath), Float64,
(Float64, Float64, Float64, Float64, Int32, Int32),
p, nranges, nmeans, df, lower_tail, log_p)
end #module
| Rmath | https://github.com/JuliaStats/Rmath.jl.git |
|
[
"MIT"
] | 0.8.0 | 852bd0f55565a9e973fcfee83a84413270224dc4 | code | 8552 | import Aqua
using Random, Rmath, Statistics, Test
Random.seed!(124)
function allEq(target::Vector{Float64}, current::Vector{Float64}, tolerance::Float64)
@test length(target) == length(current)
if all(target == current)
return true
end
xy = mean(abs.(target .- current))
xn = mean(abs.(target))
if (isfinite(xn) && xn > tolerance)
xy /= xn
end
@test xy < tolerance
return true
end
allEq(target::Vector{Float64}, current::Vector{Float64}) =
allEq(target, current, sqrt(eps()))
# dbeta
@test abs(dbeta(-1, 1, 1) - 0.0) < 10e-8
@test abs(dbeta(0, 1, 1) - 1.0) < 10e-8
@test abs(dbeta(1, 1, 1) - 1.0) < 10e-8
# dbinom
@test abs(dbinom(0, 2, 0.5) - 0.25) < 10e-8
@test abs(dbinom(1, 2, 0.5) - 0.5) < 10e-8
@test abs(dbinom(2, 2, 0.5) - 0.25) < 10e-8
# dcauchy
@test abs(dcauchy(0, 0, 1) - (1 / pi) * (1 / ((0 - 0)^2 + 1^2))) < 10e-8
@test abs(dcauchy(0, 1, 2) - (1 / pi) * (2 / ((0 - 1)^2 + 2^2))) < 10e-8
# define gammaR from Rmath to avoid introducing dependency on SpecialFunctions
# where gamma is located starting with Julia 0.7
gammaR(x::Float64) = ccall((:gammafn, Rmath.libRmath), Float64, (Float64,), x)
# dchisq
@test abs(dchisq(1, 1) - let x = 1; k = 1; (x^((k / 2) - 1) * exp(-(x / 2))) / (2^(k / 2) * gammaR(k / 2)) end) < 10e-8
@test abs(dchisq(2, 3) - let x = 2; k = 3; (x^((k / 2) - 1) * exp(-(x / 2))) / (2^(k / 2) * gammaR(k / 2)) end) < 10e-8
# dexp
@test abs(dexp(1, 2) - (1 / 2) * exp(-(1 / 2) * 1)) < 10e-8
@test abs(dexp(1, 3) - (1 / 3) * exp(-(1 / 3) * 1)) < 10e-8
@test abs(dexp(2, 3) - (1 / 3) * exp(-(1 / 3) * 2)) < 10e-8
const n = 26
const Rbeta = rbeta(n, .8, 2)
const Rbinom = rbinom(n, 55, pi/16)
const Rcauchy = rcauchy(n, 12, 2)
const Rchisq = rchisq(n, 3)
const Rexp = rexp(n, 2)
const Rf = rf(n, 12, 6)
const Rgamma = rgamma(n, 2, 5)
const Rgeom = rgeom(n, pi/16)
const Rhyper = rhyper(n, 40, 30, 20)
const Rlnorm = rlnorm(n, -1, 3)
const Rlogis = rlogis(n, 12, 2)
const Rnbinom = rnbinom(n, 7, .01)
const Rnorm = rnorm(n, -1, 3)
const Rpois = rpois(n, 12)
const Rsignrank = rsignrank(n, 47)
const Rt = rt(n, 11)
## Rt2 below (to preserve the following random numbers!)
const Runif = runif(n, .2, 2)
const Rweibull = rweibull(n, 3, 2)
const Rwilcox = rwilcox(n, 13, 17)
const Rt2 = rt(n, 1.01)
const Pbeta = pbeta.(Rbeta, .8, 2)
const Pbinom = pbinom.(Rbinom, 55, pi/16)
const Pcauchy = pcauchy.(Rcauchy, 12, 2)
const Pchisq = pchisq.(Rchisq, 3)
const Pexp = pexp.(Rexp, 2)
const Pf = pf.(Rf, 12, 6)
const Pgamma = pgamma.(Rgamma, 2, 5)
const Pgeom = pgeom.(Rgeom, pi/16)
const Phyper = phyper.(Rhyper, 40, 30, 20)
const Plnorm = plnorm.(Rlnorm, -1, 3)
const Plogis = plogis.(Rlogis, 12, 2)
const Pnbinom = pnbinom.(Rnbinom, 7, .01)
const Pnorm = pnorm.(Rnorm, -1, 3)
const Ppois = ppois.(Rpois, 12)
const Psignrank = psignrank.(Rsignrank, 47)
const Pt = pt.(Rt, 11)
const Pt2 = pt.(Rt2, 1.01)
const Punif = punif.(Runif, .2, 2)
const Pweibull = pweibull.(Rweibull, 3, 2)
const Pwilcox = pwilcox.(Rwilcox, 13, 17)
## Check q*(p*(.)) = identity
allEq(Rbeta, qbeta.(Pbeta, .8, 2))
allEq(Rbinom, qbinom.(Pbinom, 55, pi/16))
allEq(Rcauchy, qcauchy.(Pcauchy, 12, 2))
allEq(Rchisq, qchisq.(Pchisq, 3))
allEq(Rexp, qexp.(Pexp, 2))
allEq(Rf, qf.(Pf, 12, 6))
allEq(Rgamma, qgamma.(Pgamma, 2, 5))
allEq(Rgeom, qgeom.(Pgeom, pi/16))
allEq(Rhyper, qhyper.(Phyper, 40, 30, 20))
allEq(Rlnorm, qlnorm.(Plnorm, -1, 3))
allEq(Rlogis, qlogis.(Plogis, 12, 2))
allEq(Rnbinom, qnbinom.(Pnbinom, 7, .01))
allEq(Rnorm, qnorm.(Pnorm, -1, 3))
allEq(Rpois, qpois.(Ppois, 12))
allEq(Rsignrank, qsignrank.(Psignrank, 47))
allEq(Rt, qt.(Pt, 11))
allEq(Rt2, qt.(Pt2, 1.01), 1e-2)
allEq(Runif, qunif.(Punif, .2, 2))
allEq(Rweibull, qweibull.(Pweibull, 3, 2))
allEq(Rwilcox, qwilcox.(Pwilcox, 13, 17))
## Same with "upper tail":
allEq(Rbeta, qbeta.(1 .- Pbeta, .8, 2, false))
allEq(Rbinom, qbinom.(1 .- Pbinom, 55, pi/16, false))
allEq(Rcauchy, qcauchy.(1 .- Pcauchy, 12, 2, false))
allEq(Rchisq, qchisq.(1 .- Pchisq, 3, false))
allEq(Rexp, qexp.(1 .- Pexp, 2, false))
allEq(Rf, qf.(1 .- Pf, 12, 6, false))
allEq(Rgamma, qgamma.(1 .- Pgamma, 2, 5, false))
allEq(Rgeom, qgeom.(1 .- Pgeom, pi/16, false))
allEq(Rhyper, qhyper.(1 .- Phyper, 40, 30, 20, false))
allEq(Rlnorm, qlnorm.(1 .- Plnorm, -1, 3, false))
allEq(Rlogis, qlogis.(1 .- Plogis, 12, 2, false))
allEq(Rnbinom, qnbinom.(1 .- Pnbinom, 7, .01, false))
allEq(Rnorm, qnorm.(1 .- Pnorm, -1, 3,false))
allEq(Rpois, qpois.(1 .- Ppois, 12, false))
allEq(Rsignrank, qsignrank.(1 .- Psignrank, 47, false))
allEq(Rt, qt.(1 .- Pt, 11, false))
allEq(Rt2, qt.(1 .- Pt2, 1.01, false), 1e-2)
allEq(Runif, qunif.(1 .- Punif, .2, 2, false))
allEq(Rweibull, qweibull.(1 .- Pweibull, 3, 2, false))
allEq(Rwilcox, qwilcox.(1 .- Pwilcox, 13, 17, false))
const logPbinom = pbinom.(Rbinom, 55, pi/16, true, true)
const logPnbinom = pnbinom.(Rnbinom, 7, .01, true, true)
const logPpois = ppois.(Rpois, 12, true, true)
const logcPbinom = pbinom.(Rbinom, 55, pi/16, false, true)
const logcPnbinom = pnbinom.(Rnbinom, 7, .01, false, true)
const logcPpois = ppois.(Rpois, 12, false, true)
## Check q*(p* ( log ), log) = identity
allEq(Rbeta, qbeta.(log.(Pbeta), .8, 2, true, true))
allEq(Rbinom, qbinom.(logPbinom, 55, pi/16, true, true))
allEq(Rcauchy, qcauchy.(log.(Pcauchy), 12, 2, true, true))
allEq(Rchisq, qchisq.(log.(Pchisq), 3, true, true), 1e-14)
allEq(Rexp, qexp.(log.(Pexp), 2, true, true))
allEq(Rf, qf.(log.(Pf), 12, 6, true, true))
allEq(Rgamma, qgamma.(log.(Pgamma), 2, 5, true, true))
allEq(Rgeom, qgeom.(log.(Pgeom), pi/16, true, true))
allEq(Rhyper, qhyper.(log.(Phyper), 40, 30, 20, true, true))
allEq(Rlnorm, qlnorm.(log.(Plnorm), -1, 3, true, true))
allEq(Rlogis, qlogis.(log.(Plogis), 12, 2, true, true))
allEq(Rnbinom, qnbinom.(logPnbinom, 7, .01, true, true))
allEq(Rnorm, qnorm.(log.(Pnorm), -1, 3, true, true))
allEq(Rpois, qpois.(logPpois, 12, true, true))
allEq(Rsignrank, qsignrank.(log.(Psignrank), 47, true, true))
allEq(Rt, qt.(log.(Pt), 11, true, true))
allEq(Rt2, qt.(log.(Pt2), 1.01, true, true), 1e-2)
allEq(Runif, qunif.(log.(Punif), .2, 2, true, true))
allEq(Rweibull, qweibull.(log.(Pweibull), 3, 2, true, true))
allEq(Rwilcox, qwilcox.(log.(Pwilcox), 13, 17, true, true))
## same q*(p* (log) log) with upper tail:
allEq(Rbeta, qbeta.(log.(1 .- Pbeta), .8, 2, false, true))
allEq(Rbinom, qbinom.(logcPbinom, 55, pi/16, false, true))
allEq(Rcauchy, qcauchy.(log.(1 .- Pcauchy), 12, 2, false, true))
allEq(Rchisq, qchisq.(log.(1 .- Pchisq), 3, false, true))
allEq(Rexp, qexp.(log.(1 .- Pexp), 2, false, true))
allEq(Rf, qf.(log.(1 .- Pf), 12, 6, false, true))
allEq(Rgamma, qgamma.(log.(1 .- Pgamma), 2, 5, false, true))
allEq(Rgeom, qgeom.(log.(1 .- Pgeom), pi/16, false, true))
allEq(Rhyper, qhyper.(log.(1 .- Phyper), 40, 30, 20, false, true))
allEq(Rlnorm, qlnorm.(log.(1 .- Plnorm), -1, 3, false, true))
allEq(Rlogis, qlogis.(log.(1 .- Plogis), 12, 2, false, true))
allEq(Rnbinom, qnbinom.(logcPnbinom, 7, .01, false, true))
allEq(Rnorm, qnorm.(log.(1 .- Pnorm), -1, 3, false, true))
allEq(Rpois, qpois.(logcPpois, 12, false, true))
allEq(Rsignrank, qsignrank.(log.(1 .- Psignrank), 47, false, true))
allEq(Rt, qt.(log.(1 .- Pt ), 11, false, true))
allEq(Rt2, qt.(log.(1 .- Pt2), 1.01, false, true), 1e-2)
allEq(Runif, qunif.(log.(1 .- Punif), .2, 2, false, true))
allEq(Rweibull, qweibull.(log.(1 .- Pweibull), 3, 2, false, true))
allEq(Rwilcox, qwilcox.(log.(1 .- Pwilcox), 13, 17, false, true))
## Test if seed! working correctly
Random.seed!(124)
allEq(Rbeta, rbeta(n, .8, 2))
allEq(Rbinom, rbinom(n, 55, pi/16))
allEq(Rcauchy, rcauchy(n, 12, 2))
allEq(Rchisq, rchisq(n, 3))
allEq(Rexp, rexp(n, 2))
allEq(Rf, rf(n, 12, 6))
allEq(Rgamma, rgamma(n, 2, 5))
allEq(Rgeom, rgeom(n, pi/16))
allEq(Rhyper, rhyper(n, 40, 30, 20))
allEq(Rlnorm, rlnorm(n, -1, 3))
allEq(Rlogis, rlogis(n, 12, 2))
allEq(Rnbinom, rnbinom(n, 7, .01))
allEq(Rnorm, rnorm(n, -1, 3))
allEq(Rpois, rpois(n, 12))
allEq(Rsignrank, rsignrank(n, 47))
# Aqua tests
@testset "Aqua.jl" begin
Aqua.test_all(Rmath)
end
| Rmath | https://github.com/JuliaStats/Rmath.jl.git |
|
[
"MIT"
] | 0.8.0 | 852bd0f55565a9e973fcfee83a84413270224dc4 | docs | 289 | Rmath.jl
========
Archive of functions that emulate R's d-p-q-r functions for probability distributions.
Note that
libRmath-julia is licensed under the GPLv2, see https://github.com/JuliaLang/Rmath-julia/blob/master/COPYING.
The Julia bindings here are licensed under [MIT](LICENSE.md).
| Rmath | https://github.com/JuliaStats/Rmath.jl.git |
|
[
"MIT"
] | 1.0.0 | f3e5f4279d77b74bf6aef2b53562f771cc5a0474 | code | 247 | using Documenter, ExcelFiles
makedocs(
modules = [ExcelFiles],
sitename = "ExcelFiles.jl",
analytics="UA-132838790-1",
pages = [
"Introduction" => "index.md"
]
)
deploydocs(
repo = "github.com/queryverse/ExcelFiles.jl.git"
)
| ExcelFiles | https://github.com/queryverse/ExcelFiles.jl.git |
|
[
"MIT"
] | 1.0.0 | f3e5f4279d77b74bf6aef2b53562f771cc5a0474 | code | 4559 | module ExcelFiles
using ExcelReaders, XLSX, IteratorInterfaceExtensions, TableTraits, DataValues
using TableTraitsUtils, FileIO, TableShowUtils, Dates, Printf
import IterableTables
export load, save, File, @format_str
struct ExcelFile
filename::String
range::String
keywords
end
function Base.show(io::IO, source::ExcelFile)
TableShowUtils.printtable(io, getiterator(source), "Excel file")
end
function Base.show(io::IO, ::MIME"text/html", source::ExcelFile)
TableShowUtils.printHTMLtable(io, getiterator(source))
end
Base.Multimedia.showable(::MIME"text/html", source::ExcelFile) = true
function Base.show(io::IO, ::MIME"application/vnd.dataresource+json", source::ExcelFile)
TableShowUtils.printdataresource(io, getiterator(source))
end
Base.Multimedia.showable(::MIME"application/vnd.dataresource+json", source::ExcelFile) = true
function fileio_load(f::FileIO.File{FileIO.format"Excel"}, range; keywords...)
return ExcelFile(f.filename, range, keywords)
end
function fileio_save(f::FileIO.File{FileIO.format"Excel"}, data; sheetname::AbstractString="")
cols, colnames = TableTraitsUtils.create_columns_from_iterabletable(data, na_representation=:missing)
return XLSX.writetable(f.filename, cols, colnames; sheetname=sheetname)
end
IteratorInterfaceExtensions.isiterable(x::ExcelFile) = true
TableTraits.isiterabletable(x::ExcelFile) = true
function gennames(n::Integer)
res = Vector{Symbol}(undef, n)
for i in 1:n
res[i] = Symbol(@sprintf "x%d" i)
end
return res
end
function _readxl(file::ExcelReaders.ExcelFile, sheetname::AbstractString, startrow::Integer, startcol::Integer, endrow::Integer, endcol::Integer; header::Bool=true, colnames::Vector{Symbol}=Symbol[])
data = ExcelReaders.readxl_internal(file, sheetname, startrow, startcol, endrow, endcol)
nrow, ncol = size(data)
if length(colnames)==0
if header
headervec = data[1, :]
NAcol = map(i->isa(i, DataValues.DataValue) && DataValues.isna(i), headervec)
headervec[NAcol] = gennames(count(!iszero, NAcol))
# This somewhat complicated conditional makes sure that column names
# that are integer numbers end up without an extra ".0" as their name
colnames = [isa(i, AbstractFloat) ? ( modf(i)[1]==0.0 ? Symbol(Int(i)) : Symbol(string(i)) ) : Symbol(i) for i in vec(headervec)]
else
colnames = gennames(ncol)
end
elseif length(colnames)!=ncol
error("Length of colnames must equal number of columns in selected range")
end
columns = Array{Any}(undef, ncol)
for i=1:ncol
if header
vals = data[2:end,i]
else
vals = data[:,i]
end
# Check whether all non-NA values in this column
# are of the same type
type_of_el = length(vals)>0 ? typeof(vals[1]) : Any
for val=vals
type_of_el = promote_type(type_of_el, typeof(val))
end
if type_of_el <: DataValue
columns[i] = convert(DataValueArray{eltype(type_of_el)}, vals)
# TODO Check wether this hack is correct
for (j,v) in enumerate(columns[i])
if v isa DataValue && !DataValues.isna(v) && v[] isa DataValue
columns[i][j] = v[]
end
end
else
columns[i] = convert(Array{type_of_el}, vals)
end
end
return columns, colnames
end
function IteratorInterfaceExtensions.getiterator(file::ExcelFile)
column_data, col_names = if occursin("!", file.range)
excelfile = openxl(file.filename)
sheetname, startrow, startcol, endrow, endcol = ExcelReaders.convert_ref_to_sheet_row_col(file.range)
_readxl(excelfile, sheetname, startrow, startcol, endrow, endcol; file.keywords...)
else
excelfile = openxl(file.filename)
sheet = excelfile.workbook.sheet_by_name(file.range)
keywords = filter(i->!(i[1] in (:header, :colnames)), file.keywords)
startrow, startcol, endrow, endcol = ExcelReaders.convert_args_to_row_col(sheet; keywords...)
keywords2 = copy(file.keywords)
keywords2 = filter(i->!(i[1] in (:skipstartrows, :skipstartcols, :nrows, :ncols)), file.keywords)
_readxl(excelfile, file.range, startrow, startcol, endrow, endcol; keywords2...)
end
return create_tableiterator(column_data, col_names)
end
function Base.collect(file::ExcelFile)
return collect(getiterator(file))
end
end # module
| ExcelFiles | https://github.com/queryverse/ExcelFiles.jl.git |
|
[
"MIT"
] | 1.0.0 | f3e5f4279d77b74bf6aef2b53562f771cc5a0474 | code | 10178 | using ExcelFiles
using ExcelReaders
using IteratorInterfaceExtensions
using TableTraits
using TableTraitsUtils
using Dates
using DataValues
using DataFrames
using Test
@testset "ExcelFiles" begin
filename = normpath(dirname(pathof(ExcelReaders)), "..", "test", "TestData.xlsx")
efile = load(filename, "Sheet1")
@test sprint((stream,data)->show(stream, "text/html", data), efile) == "<table><thead><tr><th>Some Float64s</th><th>Some Strings</th><th>Some Bools</th><th>Mixed column</th><th>Mixed with NA</th><th>Float64 with NA</th><th>String with NA</th><th>Bool with NA</th><th>Some dates</th><th>Dates with NA</th><th>Some errors</th><th>Errors with NA</th><th>Column with NULL and then mixed</th></tr></thead><tbody><tr><td>1.0</td><td>"A"</td><td>true</td><td>2.0</td><td>9.0</td><td>3.0</td><td>"FF"</td><td>#NA</td><td>2015-03-03T00:00:00</td><td>1965-04-03T00:00:00</td><td>#DIV/0!</td><td>#DIV/0!</td><td>#NA</td></tr><tr><td>1.5</td><td>"BB"</td><td>false</td><td>"EEEEE"</td><td>"III"</td><td>#NA</td><td>#NA</td><td>true</td><td>2015-02-04T10:14:00</td><td>1950-08-09T18:40:00</td><td>#N/A</td><td>#N/A</td><td>3.4</td></tr><tr><td>2.0</td><td>"CCC"</td><td>false</td><td>false</td><td>#NA</td><td>3.5</td><td>"GGG"</td><td>#NA</td><td>1988-04-09T00:00:00</td><td>19:00:00</td><td>#REF!</td><td>#NAME?</td><td>"HKEJW"</td></tr><tr><td>2.5</td><td>"DDDD"</td><td>true</td><td>1.5</td><td>true</td><td>4.0</td><td>"HHHH"</td><td>false</td><td>15:02:00</td><td>#NA</td><td>#NAME?</td><td>#NA</td><td>#NA</td></tr></tbody></table>"
@test sprint((stream,data)->show(stream, "application/vnd.dataresource+json", data), efile) == "{\"schema\":{\"fields\":[{\"name\":\"Some Float64s\",\"type\":\"number\"},{\"name\":\"Some Strings\",\"type\":\"string\"},{\"name\":\"Some Bools\",\"type\":\"boolean\"},{\"name\":\"Mixed column\",\"type\":\"string\"},{\"name\":\"Mixed with NA\",\"type\":\"string\"},{\"name\":\"Float64 with NA\",\"type\":\"number\"},{\"name\":\"String with NA\",\"type\":\"string\"},{\"name\":\"Bool with NA\",\"type\":\"boolean\"},{\"name\":\"Some dates\",\"type\":\"string\"},{\"name\":\"Dates with NA\",\"type\":\"string\"},{\"name\":\"Some errors\",\"type\":\"string\"},{\"name\":\"Errors with NA\",\"type\":\"string\"},{\"name\":\"Column with NULL and then mixed\",\"type\":\"string\"}]},\"data\":[{\"Some Float64s\":1.0,\"Some Strings\":\"A\",\"Some Bools\":true,\"Mixed column\":2.0,\"Mixed with NA\":9.0,\"Float64 with NA\":3.0,\"String with NA\":\"FF\",\"Bool with NA\":null,\"Some dates\":\"2015-03-03T00:00:00\",\"Dates with NA\":\"1965-04-03T00:00:00\",\"Some errors\":{\"errorcode\":7},\"Errors with NA\":{\"errorcode\":7},\"Column with NULL and then mixed\":null},{\"Some Float64s\":1.5,\"Some Strings\":\"BB\",\"Some Bools\":false,\"Mixed column\":\"EEEEE\",\"Mixed with NA\":\"III\",\"Float64 with NA\":null,\"String with NA\":null,\"Bool with NA\":true,\"Some dates\":\"2015-02-04T10:14:00\",\"Dates with NA\":\"1950-08-09T18:40:00\",\"Some errors\":{\"errorcode\":42},\"Errors with NA\":{\"errorcode\":42},\"Column with NULL and then mixed\":3.4},{\"Some Float64s\":2.0,\"Some Strings\":\"CCC\",\"Some Bools\":false,\"Mixed column\":false,\"Mixed with NA\":null,\"Float64 with NA\":3.5,\"String with NA\":\"GGG\",\"Bool with NA\":null,\"Some dates\":\"1988-04-09T00:00:00\",\"Dates with NA\":\"19:00:00\",\"Some errors\":{\"errorcode\":23},\"Errors with NA\":{\"errorcode\":29},\"Column with NULL and then mixed\":\"HKEJW\"},{\"Some Float64s\":2.5,\"Some Strings\":\"DDDD\",\"Some Bools\":true,\"Mixed column\":1.5,\"Mixed with NA\":true,\"Float64 with NA\":4.0,\"String with NA\":\"HHHH\",\"Bool with NA\":false,\"Some dates\":\"15:02:00\",\"Dates with NA\":null,\"Some errors\":{\"errorcode\":29},\"Errors with NA\":null,\"Column with NULL and then mixed\":null}]}"
@test sprint(show, efile) == "4x13 Excel file\nSome Float64s │ Some Strings │ Some Bools │ Mixed column │ Mixed with NA\n──────────────┼──────────────┼────────────┼──────────────┼──────────────\n1.0 │ A │ true │ 2.0 │ 9.0 \n1.5 │ BB │ false │ \"EEEEE\" │ \"III\" \n2.0 │ CCC │ false │ false │ #NA \n2.5 │ DDDD │ true │ 1.5 │ true \n... with 8 more columns: Float64 with NA, String with NA, Bool with NA, Some dates, Dates with NA, Some errors, Errors with NA, Column with NULL and then mixed"
@test TableTraits.isiterabletable(efile) == true
@test IteratorInterfaceExtensions.isiterable(efile) == true
@test showable("text/html", efile) == true
@test showable("application/vnd.dataresource+json", efile) == true
@test isiterable(efile) == true
full_dfs = [create_columns_from_iterabletable(load(filename, "Sheet1!C3:O7")), create_columns_from_iterabletable(load(filename, "Sheet1"))]
for (df, names) in full_dfs
@test length(df) == 13
@test length(df[1]) == 4
@test df[1] == [1., 1.5, 2., 2.5]
@test df[2] == ["A", "BB", "CCC", "DDDD"]
@test df[3] == [true, false, false, true]
@test df[4] == [2, "EEEEE", false, 1.5]
@test df[5] == [9., "III", NA, true]
@test df[6] == [3., NA, 3.5, 4]
@test df[7] == ["FF", NA, "GGG", "HHHH"]
@test df[8] == [NA, true, NA, false]
@test df[9] == [Date(2015,3,3), DateTime(2015,2,4,10,14), Date(1988,4,9), Dates.Time(15,2,0)]
@test df[10] == [Date(1965,4,3), DateTime(1950,8,9,18,40), Dates.Time(19,0,0), NA]
@test eltype(df[11]) == ExcelReaders.ExcelErrorCell
@test df[12][1][] isa ExcelReaders.ExcelErrorCell
@test df[12][2][] isa ExcelReaders.ExcelErrorCell
@test df[12][3][] isa ExcelReaders.ExcelErrorCell
@test df[12][4] == NA
@test df[13] == [NA, 3.4, "HKEJW", NA]
end
df, names = create_columns_from_iterabletable(load(filename, "Sheet1!C4:O7", header=false))
@test names == [:x1,:x2,:x3,:x4,:x5,:x6,:x7,:x8,:x9,:x10,:x11,:x12,:x13]
@test length(df[1]) == 4
@test length(df) == 13
@test df[1] == [1., 1.5, 2., 2.5]
@test df[2] == ["A", "BB", "CCC", "DDDD"]
@test df[3] == [true, false, false, true]
@test df[4] == [2, "EEEEE", false, 1.5]
@test df[5] == [9., "III", NA, true]
@test df[6] == [3, NA, 3.5, 4]
@test df[7] == ["FF", NA, "GGG", "HHHH"]
@test df[8] == [NA, true, NA, false]
@test df[9] == [Date(2015, 3, 3), DateTime(2015, 2, 4, 10, 14), DateTime(1988, 4, 9), Dates.Time(15,2,0)]
@test df[10] == [Date(1965, 4, 3), DateTime(1950, 8, 9, 18, 40), Dates.Time(19,0,0), NA]
@test isa(df[11][1], ExcelReaders.ExcelErrorCell)
@test isa(df[11][2], ExcelReaders.ExcelErrorCell)
@test isa(df[11][3], ExcelReaders.ExcelErrorCell)
@test isa(df[11][4], ExcelReaders.ExcelErrorCell)
@test isa(df[12][1][], ExcelReaders.ExcelErrorCell)
@test isa(df[12][2][], ExcelReaders.ExcelErrorCell)
@test isa(df[12][3][], ExcelReaders.ExcelErrorCell)
@test DataValues.isna(df[12][4])
@test df[13] == [NA, 3.4, "HKEJW", NA]
good_colnames = [:c1, :c2, :c3, :c4, :c5, :c6, :c7, :c8, :c9, :c10, :c11, :c12, :c13]
df, names = create_columns_from_iterabletable(load(filename, "Sheet1!C4:O7", header=false, colnames=good_colnames))
@test names == good_colnames
@test length(df[1]) == 4
@test length(df) == 13
@test df[1] == [1., 1.5, 2., 2.5]
@test df[2] == ["A", "BB", "CCC", "DDDD"]
@test df[3] == [true, false, false, true]
@test df[4] == [2, "EEEEE", false, 1.5]
@test df[5] == [9., "III", NA, true]
@test df[6] == [3, NA, 3.5, 4]
@test df[7] == ["FF", NA, "GGG", "HHHH"]
@test df[8] == [NA, true, NA, false]
@test df[9] == [Date(2015, 3, 3), DateTime(2015, 2, 4, 10, 14), DateTime(1988, 4, 9), Dates.Time(15,2,0)]
@test df[10] == [Date(1965, 4, 3), DateTime(1950, 8, 9, 18, 40), Dates.Time(19,0,0), NA]
@test isa(df[11][1], ExcelReaders.ExcelErrorCell)
@test isa(df[11][2], ExcelReaders.ExcelErrorCell)
@test isa(df[11][3], ExcelReaders.ExcelErrorCell)
@test isa(df[11][4], ExcelReaders.ExcelErrorCell)
@test isa(df[12][1][], ExcelReaders.ExcelErrorCell)
@test isa(df[12][2][], ExcelReaders.ExcelErrorCell)
@test isa(df[12][3][], ExcelReaders.ExcelErrorCell)
@test DataValues.isna(df[12][4])
@test df[13] == [NA, 3.4, "HKEJW", NA]
# Test for saving DataFrame to XLSX
input = (Day=["Nov. 27","Nov. 28","Nov. 29"], Highest=[78,79,75]) |> DataFrame
file = save("file.xlsx", input)
output = load("file.xlsx", "Sheet1") |> DataFrame
@test input == output
rm("file.xlsx")
# Test for saving DataFrame to XLSX with sheetname keyword
input = (Day=["Nov. 27","Nov. 28","Nov. 29"], Highest=[78,79,75]) |> DataFrame
file = save("file.xlsx", input, sheetname="SheetName")
output = load("file.xlsx", "SheetName") |> DataFrame
@test input == output
rm("file.xlsx")
df, names = create_columns_from_iterabletable(load(filename, "Sheet1", colnames=good_colnames))
@test names == good_colnames
@test length(df[1]) == 4
@test length(df) == 13
@test df[1] == [1., 1.5, 2., 2.5]
@test df[2] == ["A", "BB", "CCC", "DDDD"]
@test df[3] == [true, false, false, true]
@test df[4] == [2, "EEEEE", false, 1.5]
@test df[5] == [9., "III", NA, true]
@test df[6] == [3, NA, 3.5, 4]
@test df[7] == ["FF", NA, "GGG", "HHHH"]
@test df[8] == [NA, true, NA, false]
@test df[9] == [Date(2015, 3, 3), DateTime(2015, 2, 4, 10, 14), DateTime(1988, 4, 9), Dates.Time(15,2,0)]
@test df[10] == [Date(1965, 4, 3), DateTime(1950, 8, 9, 18, 40), Dates.Time(19,0,0), NA]
@test isa(df[11][1], ExcelReaders.ExcelErrorCell)
@test isa(df[11][2], ExcelReaders.ExcelErrorCell)
@test isa(df[11][3], ExcelReaders.ExcelErrorCell)
@test isa(df[11][4], ExcelReaders.ExcelErrorCell)
@test isa(df[12][1][], ExcelReaders.ExcelErrorCell)
@test isa(df[12][2][], ExcelReaders.ExcelErrorCell)
@test isa(df[12][3][], ExcelReaders.ExcelErrorCell)
@test DataValues.isna(df[12][4])
@test df[13] == [NA, 3.4, "HKEJW", NA]
# Too few colnames
@test_throws ErrorException create_columns_from_iterabletable(load(filename, "Sheet1!C4:O7", header=true, colnames=[:c1, :c2, :c3, :c4]))
# Test for constructing DataFrame with empty header cell
data, names = create_columns_from_iterabletable(load(filename, "Sheet2!C5:E7"))
@test names == [:Col1, :x1, :Col3]
end
| ExcelFiles | https://github.com/queryverse/ExcelFiles.jl.git |
|
[
"MIT"
] | 1.0.0 | f3e5f4279d77b74bf6aef2b53562f771cc5a0474 | docs | 943 | # ExcelFiles.jl v1.0.0
* Drop julia 0.7 support
* Migrate to Project.toml
* Fix column type detection
# ExcelFiles.jl v0.9.1
* Update to latest PyCall syntax
# ExcelFiles.jl v0.9.0
* Add support for "application/vnd.dataresource+json" MIME type
# ExcelFiles.jl v0.8.0
* Export FileIO.File and FileIO.@format_str
# ExcelFiles.jl v0.7.0
* Support writing of xlsx files
# ExcelFiles.jl v0.6.1
* Work around bug in pkg registry conversion script
# ExcelFiles.jl v0.6.0
* Drop julia 0.6 support, add julia 0.7 support
# ExcelFiles.jl v0.5.0
* Add show method
# ExcelFiles.jl v0.4.0
* Export load and save
# ExcelFiles.jl v0.3.1
* Fix bug related to skipstartrows etc.
# ExcelFiles.jl v0.3.0
* Incorporate all table functionality from ExcelReaders.jl.
* Drop dependency on DataTables.jl and DataFrames.jl.
# ExcelFiles.jl v0.2.0
* Move to TableTraits.jl
# ExcelFiles.jl v0.1.0
* Bug fix release
# ExcelFiles.jl v0.0.1
* Initial release
| ExcelFiles | https://github.com/queryverse/ExcelFiles.jl.git |
|
[
"MIT"
] | 1.0.0 | f3e5f4279d77b74bf6aef2b53562f771cc5a0474 | docs | 3526 | # ExcelFiles
[](http://www.repostatus.org/#active)
[](https://travis-ci.org/queryverse/ExcelFiles.jl)
[](https://ci.appveyor.com/project/queryverse/excelfiles-jl/branch/master)
[](http://codecov.io/github/queryverse/ExcelFiles.jl?branch=master)
## Overview
This package provides load support for Excel files under the
[FileIO.jl](https://github.com/JuliaIO/FileIO.jl) package.
## Installation
Use ``Pkg.add("ExcelFiles")`` in Julia to install ExcelFiles and its dependencies.
## Usage
### Load an Excel file
To read a Excel file into a ``DataFrame``, use the following julia code:
````julia
using ExcelFiles, DataFrames
df = DataFrame(load("data.xlsx", "Sheet1"))
````
The call to ``load`` returns a ``struct`` that is an [IterableTable.jl](https://github.com/queryverse/IterableTables.jl), so it can be passed to any function that can handle iterable tables, i.e. all the sinks in [IterableTable.jl](https://github.com/queryverse/IterableTables.jl). Here are some examples of materializing an Excel file into data structures that are not a ``DataFrame``:
````julia
using ExcelFiles, DataTables, IndexedTables, TimeSeries, Temporal, Gadfly
# Load into a DataTable
dt = DataTable(load("data.xlsx", "Sheet1"))
# Load into an IndexedTable
it = IndexedTable(load("data.xlsx", "Sheet1"))
# Load into a TimeArray
ta = TimeArray(load("data.xlsx", "Sheet1"))
# Load into a TS
ts = TS(load("data.xlsx", "Sheet1"))
# Plot directly with Gadfly
plot(load("data.xlsx", "Sheet1"), x=:a, y=:b, Geom.line)
````
The ``load`` function also takes a number of parameters:
````julia
function load(f::FileIO.File{FileIO.format"Excel"}, range; keywords...)
````
#### Arguments:
* ``range``: either the name of the sheet in the Excel file to read, or a full Excel range specification (i.e. "Sheetname!A1:B2").
* The ``keywords`` arguments are the same as in [ExcelReaders.jl](https://github.com/queryverse/ExcelReaders.jl) (which is used under the hood to read Excel files). When ``range`` is a sheet name, the keyword arguments for the ``readxlsheet`` function from ExcelReaders.jl apply, if ``range`` is a range specification, the keyword arguments for the ``readxl`` function apply.
### Save an Excel file
The following code saves any iterable table as an excel file:
````julia
using ExcelFiles
save("output.xlsx", it)
````
This will work as long as it is any of the types supported as sources in IterableTables.jl.
### Using the pipe syntax
``load`` also support the pipe syntax. For example, to load an Excel file into a ``DataFrame``, one can use the following code:
````julia
using ExcelFiles, DataFrame
df = load("data.xlsx", "Sheet1") |> DataFrame
````
To save an iterable table, one can use the following form:
````julia
using ExcelFiles, DataFrame
df = # Aquire a DataFrame somehow
df |> save("output.xlsx")
````
The pipe syntax is especially useful when combining it with [Query.jl](https://github.com/queryverse/Query.jl) queries, for example one can easily load an Excel file, pipe it into a query, then pipe it to the ``save`` function to store the results in a new file.
| ExcelFiles | https://github.com/queryverse/ExcelFiles.jl.git |
|
[
"MIT"
] | 1.2.1 | b9a0be13edfd6b831cada22f5ac9a05c0ede6018 | code | 521 | using Documenter, KeyedFrames
makedocs(;
modules=[KeyedFrames],
format=Documenter.HTML(prettyurls=(get(ENV, "CI", nothing)=="true")),
pages=[
"Home" => "index.md",
],
repo="https://github.com/invenia/KeyedFrames.jl/blob/{commit}{path}#L{line}",
sitename="KeyedFrames.jl",
authors="Invenia Technical Computing Corporation",
assets=[
"assets/invenia.css",
"assets/logo.png",
],
)
deploydocs(;
repo="github.com/invenia/KeyedFrames.jl",
target="build",
)
| KeyedFrames | https://github.com/invenia/KeyedFrames.jl.git |
|
[
"MIT"
] | 1.2.1 | b9a0be13edfd6b831cada22f5ac9a05c0ede6018 | code | 11992 | module KeyedFrames
import Base: @deprecate
import DataFrames: deletecols!, deleterows!
using DataFrames
using DataFrames: DataFrameRow, SubDataFrame
using DataFrames: delete!, first, index, last, ncol, nonunique, nrow, permutecols!,
rename, rename!, select, select!, unique!
struct KeyedFrame <: AbstractDataFrame
frame::DataFrame
key::Vector{Symbol}
function KeyedFrame(df::DataFrame, key::Vector{Symbol})
key = unique(key)
df_names = propertynames(df)
if !issubset(key, df_names)
throw(
ArgumentError(
string(
"The columns provided for the key ($key) must all be ",
"present in the DataFrame ($df_names)."
)
)
)
end
return new(df, key)
end
end
function KeyedFrame(df::DataFrame, key::Vector{<:AbstractString})
return KeyedFrame(df, map(Symbol, key))
end
KeyedFrame(df::DataFrame, key::Symbol) = KeyedFrame(df, [key])
"""
KeyedFrame(df::DataFrame, key::Vector{Symbol})
Create an `KeyedFrame` using the provided `DataFrame`; `key` specifies the columns
to use by default when performing a `join` on `KeyedFrame`s when `on` is not provided.
When performing a `join`, if only one of the arguments is an `KeyedFrame` and `on` is not
specified, the frames will be joined on the `key` of the `KeyedFrame`. If both
arguments are `KeyedFrame`s, `on` will default to the intersection of their respective
indices. In all cases, the result of the `join` will share a type with the first argument.
When calling `unique` (or `unique!`) on a KeyedFrame without providing a `cols` argument,
`cols` will default to the `key` of the `KeyedFrame` instead of all columns. If you wish to
remove only rows that are duplicates across all columns (rather than just across the key),
you can call `unique!(kf, names(kf))`.
When `sort`ing, if no `cols` keyword is supplied, the `key` is used to determine precedence.
When testing for equality, `key` ordering is ignored, which means that it's possible to have
two `KeyedFrame`s that are considered equal but whose default sort order will be different
by virtue of having the columns listed in a different order in their `key`s.
"""
KeyedFrame
DataFrames.DataFrame(kf::KeyedFrame) = frame(kf)
Base.copy(kf::KeyedFrame) = KeyedFrame(copy(DataFrame(kf)), copy(keys(kf)))
Base.deepcopy(kf::KeyedFrame) = KeyedFrame(deepcopy(DataFrame(kf)), deepcopy(keys(kf)))
Base.convert(::Type{DataFrame}, kf::KeyedFrame) = frame(kf)
DataFrames.SubDataFrame(kf::KeyedFrame, args...) = SubDataFrame(frame(kf), args...)
DataFrames.DataFrameRow(kf::KeyedFrame, args...) = DataFrameRow(frame(kf), args...)
if isdefined(DataFrames, :_check_consistency)
DataFrames._check_consistency(kf::KeyedFrame) = DataFrames._check_consistency(frame(kf))
end
##### EQUALITY #####
Base.:(==)(a::KeyedFrame, b::KeyedFrame) = frame(a) == frame(b) && sort(keys(a)) == sort(keys(b))
Base.isequal(a::KeyedFrame, b::KeyedFrame) = isequal(frame(a), frame(b)) && isequal(keys(a), keys(b))
Base.isequal(a::KeyedFrame, b::AbstractDataFrame) = false
Base.isequal(a::AbstractDataFrame, b::KeyedFrame) = false
Base.hash(kf::KeyedFrame, h::UInt) = hash(keys(kf), hash(frame(kf), h))
##### SIZE #####
DataFrames.nrow(kf::KeyedFrame) = nrow(frame(kf))
DataFrames.ncol(kf::KeyedFrame) = ncol(frame(kf))
##### ACCESSORS #####
DataFrames.index(kf::KeyedFrame) = index(frame(kf))
Base.names(kf::KeyedFrame) = names(frame(kf))
##### INDEXING #####
const ColumnIndex = Union{Real, Symbol}
frame(kf::KeyedFrame) = getfield(kf, :frame)
Base.keys(kf::KeyedFrame) = getfield(kf, :key)
Base.setindex!(kf::KeyedFrame, value, ind...) = setindex!(frame(kf), value, ind...)
Base.setproperty!(kf::KeyedFrame, field::Symbol, value) = setproperty!(frame(kf), field, value)
# I don't want to have to write the same function body several times, so...
function _kf_getindex(kf::KeyedFrame, index...)
# If indexing by column, some keys might be removed.
df = frame(kf)[index...]
return KeyedFrame(DataFrame(df), intersect(propertynames(df), keys(kf)))
end
# Returns a KeyedFrame
Base.getindex(kf::KeyedFrame, ::Colon) = copy(kf)
Base.getindex(kf::KeyedFrame, ::Colon, ::Colon) = copy(kf)
# Returns a KeyedFrame
Base.getindex(kf::KeyedFrame, ::typeof(!), col::AbstractVector) = _kf_getindex(kf, !, col)
# Returns a KeyedFrame or a column (depending on the type of col)
Base.getindex(kf::KeyedFrame, ::typeof(!), col) = frame(kf)[!, col]
Base.getindex(kf::KeyedFrame, ::Colon, col) = frame(kf)[:, col]
# Returns a scalar
Base.getindex(kf::KeyedFrame, row::Integer, col::ColumnIndex) = frame(kf)[row, col]
# Returns a KeyedFrame
Base.getindex(kf::KeyedFrame, row::Integer, col::AbstractVector) = _kf_getindex(kf, row, col)
# Returns a column
Base.getindex(kf::KeyedFrame, row::AbstractVector, col::ColumnIndex) = frame(kf)[row, col]
# Returns a KeyedFrame
function Base.getindex(kf::KeyedFrame, row::AbstractVector, col::AbstractVector)
return _kf_getindex(kf, row, col)
end
# Returns a KeyedFrame
function Base.getindex(kf::KeyedFrame, row::AbstractVector, col::Colon)
return _kf_getindex(kf, row, col)
end
# Returns a KeyedFrame
Base.getindex(kf::KeyedFrame, row::Integer, col::Colon) = kf[[row], col]
##### SORTING #####
function Base.sort(kf::KeyedFrame, cols=nothing; kwargs...)
return KeyedFrame(sort(frame(kf), cols === nothing ? keys(kf) : cols; kwargs...), keys(kf))
end
function Base.sort!(kf::KeyedFrame, cols=nothing; kwargs...)
sort!(frame(kf), cols === nothing ? keys(kf) : cols; kwargs...)
return kf
end
function Base.issorted(kf::KeyedFrame, cols=nothing; kwargs...)
return issorted(frame(kf), cols === nothing ? keys(kf) : cols; kwargs...)
end
##### PUSH/APPEND/DELETE #####
function Base.push!(kf::KeyedFrame, data)
push!(frame(kf), data)
return kf
end
function Base.append!(kf::KeyedFrame, data)
append!(frame(kf), data)
return kf
end
function DataFrames.delete!(kf::KeyedFrame, inds)
delete!(frame(kf), inds)
return kf
end
@deprecate deleterows!(kf::KeyedFrame, inds) delete!(kf, inds)
@deprecate deletecols!(kf::KeyedFrame, inds) select!(kf, Not(inds))
function DataFrames.select!(kf::KeyedFrame, inds)
select!(frame(kf), inds)
new_keys = propertynames(kf)
filter!(in(new_keys), keys(kf))
return kf
end
function DataFrames.select(kf::KeyedFrame, inds; copycols::Bool=true)
new_df = select(frame(kf), inds; copycols=copycols)
df_names = propertynames(new_df)
new_keys = filter(in(df_names), keys(kf))
return KeyedFrame(new_df, new_keys)
end
##### RENAME #####
function DataFrames.rename!(kf::KeyedFrame, nms::AbstractVector{Pair{Symbol,Symbol}})
rename!(frame(kf), nms)
for (from, to) in nms
i = findfirst(isequal(from), keys(kf))
if i !== nothing
keys(kf)[i] = to
end
end
return kf
end
DataFrames.rename!(kf::KeyedFrame, nms::Pair{Symbol, Symbol}...) = rename!(kf, collect(nms))
DataFrames.rename!(kf::KeyedFrame, nms::Dict{Symbol, Symbol}) = rename!(kf, collect(pairs(nms)))
DataFrames.rename!(f::Function, kf::KeyedFrame) = rename!(kf, [(nm => f(nm)) for nm in propertynames(kf)])
DataFrames.rename(kf::KeyedFrame, args...) = rename!(copy(kf), args...)
DataFrames.rename(f::Function, kf::KeyedFrame) = rename!(f, copy(kf))
##### UNIQUE #####
_unique(kf::KeyedFrame, cols) = KeyedFrame(unique(frame(kf), cols), keys(kf))
function _unique!(kf::KeyedFrame, cols)
unique!(frame(kf), cols)
return kf
end
Base.unique(kf::KeyedFrame, cols::AbstractVector) = _unique(kf, cols)
Base.unique(kf::KeyedFrame, cols::Union{Integer, Symbol, Colon}) = _unique(kf, cols)
Base.unique(kf::KeyedFrame) = _unique(kf, keys(kf))
DataFrames.unique!(kf::KeyedFrame, cols::Union{Integer, Symbol, Colon}) = _unique!(kf, cols)
DataFrames.unique!(kf::KeyedFrame, cols::AbstractVector) = _unique!(kf, cols)
DataFrames.unique!(kf::KeyedFrame) = _unique!(kf, keys(kf))
DataFrames.nonunique(kf::KeyedFrame) = nonunique(frame(kf), keys(kf))
DataFrames.nonunique(kf::KeyedFrame, cols) = nonunique(frame(kf), cols)
##### JOINING #####
# Implement the various join functions provided by DataFrames.jl. If a `KeyedFrame` is used
# in a join and no `on` keyword is provided then the keys of the `KeyedFrame` will be used
# when joining. Additionally, a `KeyedFrame` will be returned only if the first argument to
# the join function is of type `KeyedFrame`.
for j in (:innerjoin, :leftjoin, :rightjoin, :outerjoin, :semijoin, :antijoin, :crossjoin)
# Note: We could probably support joining more than two DataFrames but it becomes
# tricker with what key to use with multiple KeyedFrames.
@eval begin
# Returns a KeyedFrame
function DataFrames.$j(
kf1::KeyedFrame,
kf2::KeyedFrame;
on=nothing,
kwargs...,
)
if on === nothing
on = intersect(keys(kf1), keys(kf2))
end
result = $j(
frame(kf1),
frame(kf2);
on=on,
kwargs...,
)
key = $(if j in (:semijoin, :antijoin)
:(intersect(keys(kf1), propertynames(result)))
else
# A join can sometimes rename columns, meaning some of the key columns "disappear"
:(intersect(union(keys(kf1), keys(kf2)), propertynames(result)))
end)
return KeyedFrame(result, key)
end
# Returns a KeyedFrame
function DataFrames.$j(
kf::KeyedFrame,
df::AbstractDataFrame;
on=nothing,
kwargs...,
)
if on === nothing
on = intersect(keys(kf), propertynames(df))
end
result = $j(
frame(kf),
df;
on=on,
kwargs...,
)
key = intersect(keys(kf), propertynames(result))
return KeyedFrame(result, key)
end
# Does NOT return a KeyedFrame
function DataFrames.$j(
df::AbstractDataFrame,
kf::KeyedFrame;
on=nothing,
kwargs...,
)
if on === nothing
on = intersect(keys(kf), propertynames(df))
end
result = $j(
df,
frame(kf);
on=on,
kwargs...,
)
return result
end
end
end
# Implement the previously defined `join` functions used with DataFrames < 0.21
for (T, S) in [
(:KeyedFrame, :KeyedFrame),
(:KeyedFrame, :AbstractDataFrame),
(:AbstractDataFrame, :KeyedFrame)
]
@eval begin
function Base.join(df1::$T, df2::$S; on=nothing, kind=:inner, kwargs...)
j = if kind === :inner
innerjoin
elseif kind === :left
leftjoin
elseif kind === :right
rightjoin
elseif kind === :outer
outerjoin
elseif kind === :semi
semijoin
elseif kind === :anti
antijoin
elseif kind === :crossjoin
crossjoin
else
throw(ArgumentError("Unknown join kind: $kind"))
end
Base.depwarn("$kind joining data frames using `join` is deprecated, use `$(kind)join` instead", :join)
return j(df1, df2; on=on, kwargs...)
end
end
end
##### FIRST/LAST #####
DataFrames.first(kf::KeyedFrame, r::Int) = KeyedFrame(first(frame(kf), r), keys(kf))
DataFrames.last(kf::KeyedFrame, r::Int) = KeyedFrame(last(frame(kf), r), keys(kf))
##### PERMUTE #####
function DataFrames.permutecols!(kf::KeyedFrame, index::AbstractVector)
select!(frame(kf), index)
return kf
end
export KeyedFrame
end
| KeyedFrames | https://github.com/invenia/KeyedFrames.jl.git |
|
[
"MIT"
] | 1.2.1 | b9a0be13edfd6b831cada22f5ac9a05c0ede6018 | code | 17863 | using KeyedFrames
using DataFrames
using Test
@testset "KeyedFrames" begin
df1 = DataFrame(:a => 1:10, :b => 2:11, :c => 3:12)
df2 = DataFrame(:a => 1:5, :d => 4:8)
df3 = DataFrame(:a => [4, 2, 1], :e => [2, 5, 2], :f => 1:3)
@testset "constructor" begin
kf1 = KeyedFrame(df1, [:a, :b])
@test KeyedFrames.frame(kf1) === df1
@test keys(kf1) == [:a, :b]
kf2 = KeyedFrame(df2, :a)
@test KeyedFrames.frame(kf2) === df2
@test keys(kf2) == [:a]
kf3 = KeyedFrame(df3, ["e", "a"])
@test KeyedFrames.frame(kf3) === df3
@test keys(kf3) == [:e, :a]
@test_throws ArgumentError KeyedFrame(df1, [:a, :b, :d])
@test_throws ArgumentError KeyedFrame(df1, :d)
@test keys(KeyedFrame(df1, [:a, :a, :b, :a])) == [:a, :b]
end
kf1 = KeyedFrame(df1, [:a, :b])
kf2 = KeyedFrame(df2, :a)
kf3 = KeyedFrame(df3, ["e", "a"])
@testset "equality" begin
cp = deepcopy(kf1)
cpd = deepcopy(df1)
@test kf1 == kf1
@test isequal(kf1, kf1)
@test hash(kf1) == hash(kf1)
@test kf1 == cp
@test isequal(kf1, cp)
@test hash(kf1) == hash(cp)
@test kf1 == KeyedFrame(cpd, [:a, :b])
@test isequal(kf1, KeyedFrame(cpd, [:a, :b]))
@test hash(kf1) == hash(KeyedFrame(cpd, [:a, :b]))
@test kf1 == KeyedFrame(cpd, [:b, :a])
@test !isequal(kf1, KeyedFrame(cpd, [:b, :a]))
@test hash(kf1) != hash(KeyedFrame(cpd, [:b, :a]))
@test kf1 == df1
@test df1 == kf1
@test !isequal(kf1, df1)
@test !isequal(df1, kf1)
@test hash(kf1) != hash(df1)
@test kf1 != KeyedFrame(cpd, [:a, :b, :c])
@test !isequal(kf1, KeyedFrame(cpd, [:a, :b, :c]))
@test hash(kf1) != hash(KeyedFrame(cpd, [:a, :b, :c]))
@test kf2 != KeyedFrame(df3, :a)
@test !isequal(kf2, KeyedFrame(df3, :a))
@test hash(kf2) != hash(KeyedFrame(df3, :a))
end
@testset "copy" begin
@test kf1 == deepcopy(kf1)
@test kf1 !== deepcopy(kf1)
@test DataFrame(kf1) !== DataFrame(deepcopy(kf1))
@test keys(kf1) !== keys(deepcopy(kf1))
end
@testset "convert" begin
for (kf, df) in [(kf1, df1), (kf2, df2), (kf3, df3)]
@test convert(DataFrame, kf) == df
@test DataFrame(kf) == df
@test SubDataFrame(kf, 1:3, :) == SubDataFrame(df, 1:3, :)
end
end
@testset "size" begin
for (kf, df) in [(kf1, df1), (kf2, df2), (kf3, df3)]
@test nrow(kf) == nrow(df)
@test ncol(kf) == ncol(df)
@test size(kf) == size(df)
end
end
@testset "names/index/key" begin
for (kf, df) in [(kf1, df1), (kf2, df2), (kf3, df3)]
@test KeyedFrames.names(kf) == DataFrames.names(df)
@test KeyedFrames.index(kf) == DataFrames.index(df)
@test keys(kf) == keys(kf)
end
end
@testset "getindex" begin
@test isequal(kf1[:], kf1)
@test isequal(kf1[:, :], kf1)
@test isequal(kf1[1, :], KeyedFrame(DataFrame(; a=1, b=2, c=3), [:a, :b]))
@test isequal(kf1[8:10, :], KeyedFrame(DataFrame(; a=8:10, b=9:11, c=10:12), [:a, :b]))
@test isequal(kf1[!, 1:2], KeyedFrame(DataFrame(; a=1:10, b=2:11), [:a, :b]))
@test isequal(kf1[!, [:a, :b]], KeyedFrame(DataFrame(; a=1:10, b=2:11), [:a, :b]))
@test isequal(kf1[1, [:a, :b]], KeyedFrame(DataFrame(; a=1, b=2), [:a, :b]))
@test isequal(kf1[8:10, 1:2], KeyedFrame(DataFrame(; a=8:10, b=9:11), [:a, :b]))
@test kf1[:, 1:2] == KeyedFrame(DataFrame(; a=1:10, b=2:11), [:a, :b])
@test kf1[:, [:a, :b]] == KeyedFrame(DataFrame(; a=1:10, b=2:11), [:a, :b])
# When :a column disappears it is removed from the key
@test isequal(kf1[!, 2:3], KeyedFrame(DataFrame(; b=2:11, c=3:12), :b))
@test isequal(kf1[!, [:b, :c]], KeyedFrame(DataFrame(; b=2:11, c=3:12), :b))
@test kf1[:, 2:3] == KeyedFrame(DataFrame(; b=2:11, c=3:12), :b)
@test kf1[:, [:b, :c]] == KeyedFrame(DataFrame(; b=2:11, c=3:12), :b)
# Returns a column/value instead of a KeyedFrame
@test isequal(kf1[1, :a], 1)
@test isequal(kf1[8:10, 1], [8, 9, 10])
@test isequal(kf1[:, :b], collect(2:11))
@test isequal(kf1[:, 2], collect(2:11))
@test kf1[!, "a"] == 1:10
@test isequal(kf1[!, ["a", "b"]], KeyedFrame(DataFrame(; a=1:10, b=2:11), [:a, :b]))
end
@testset "setindex!" begin
cp = deepcopy(kf1)
cp[!, :b] = collect(11:20) # Need to collect on a setindex! with DataFrames.
@test isequal(cp, KeyedFrame(DataFrame(; a=1:10, b=11:20, c=3:12), [:a, :b]))
cp = deepcopy(kf1)
cp[!, :b] .= 3
@test isequal(cp, KeyedFrame(DataFrame(; a=1:10, b=3, c=3:12), [:a, :b]))
cp = deepcopy(kf1)
cp[!, 2] .= 3
@test isequal(cp, KeyedFrame(DataFrame(; a=1:10, b=3, c=3:12), [:a, :b]))
cp = deepcopy(kf1)
cp[!, [:b, :c]] .= 3
@test isequal(cp, KeyedFrame(DataFrame(; a=1:10, b=3, c=3), [:a, :b]))
cp = deepcopy(kf1)
cp[!, 2:3] .= 3
@test isequal(cp, KeyedFrame(DataFrame(; a=1:10, b=3, c=3), [:a, :b]))
cp = deepcopy(kf2)
cp[1, :d] = 10
@test isequal(cp, KeyedFrame(DataFrame(; a=1:5, d=[10, 5, 6, 7, 8]), :a))
cp = deepcopy(kf2)
cp[1, 1:2] .= 10
@test isequal(cp, KeyedFrame(DataFrame(;a=[10, 2, 3, 4, 5],d=[10, 5, 6, 7, 8]), :a))
end
@testset "setproperty!" begin
cp = deepcopy(kf1)
cp.b = 10:-1:1
@test isequal(cp, KeyedFrame(DataFrame(; a=1:10, b=10:-1:1, c=3:12), [:a, :b]))
end
@testset "first/last" begin
# Don't assume that n will always equal 6
@test first(kf1) isa KeyedFrame
@test isequal(first(kf1, 1), kf1[1, :])
@test isequal(first(kf1, 3), kf1[1:3, :])
@test isequal(first(kf1, 6), kf1[1:6, :])
@test last(kf1) isa KeyedFrame
@test isequal(last(kf1, 1), kf1[end, :])
@test isequal(last(kf1, 3), kf1[end - 2:end, :])
@test isequal(last(kf1, 6), kf1[end - 5:end, :])
end
@testset "sort" begin
# No columns specified (sort by key)
expected = KeyedFrame(DataFrame(; a=[1, 4, 2], e=[2, 2, 5], f=[3, 1, 2]), [:e, :a])
reversed = KeyedFrame(DataFrame(; a=[2, 4, 1], e=[5, 2, 2], f=[2, 1, 3]), [:e, :a])
@test sort(kf3) == expected
@test sort(kf3; rev=true) == reversed
cp = deepcopy(kf3)
@test sort!(cp) == expected
@test cp == expected
cp = deepcopy(kf3)
@test sort!(cp; rev=true) == reversed
@test cp == reversed
@test issorted(kf1)
@test !issorted(kf3)
@test issorted(expected)
@test !issorted(reversed)
@test issorted(reversed; rev=true)
# Columns specified
expected = KeyedFrame(DataFrame(; a=[1, 2, 4], e=[2, 5, 2], f=[3, 2, 1]), [:e, :a])
reversed = KeyedFrame(DataFrame(; a=[4, 2, 1], e=[2, 5, 2], f=[1, 2, 3]), [:e, :a])
@test sort(kf3, :a) == expected
@test sort(kf3, :a; rev=true) == reversed
cp = deepcopy(kf3)
@test sort!(cp, :a) == expected
@test cp == expected
cp = deepcopy(kf3)
@test sort!(cp, :a; rev=true) == reversed
@test cp == reversed
@test !issorted(expected)
@test issorted(expected, :a)
@test !issorted(reversed)
@test !issorted(reversed, :a)
@test issorted(reversed, :a; rev=true)
# Test return type of `sort!`
@test isa(sort!(deepcopy(kf3), :a), KeyedFrame)
end
@testset "push!" begin
cp = deepcopy(kf2)
push!(cp, [6, 9])
@test cp == KeyedFrame(DataFrame(; a=1:6, d=4:9), :a)
# Test return type of `push!`
@test isa(push!(deepcopy(kf2), [6, 9]), KeyedFrame)
end
@testset "append!" begin
cp = deepcopy(kf2)
append!(cp, DataFrame(; a=6:8, d=9:11))
@test cp == KeyedFrame(DataFrame(; a=1:8, d=4:11), :a)
# With append! we discard the key from the second KeyedFrame, which I think is fine.
cp = deepcopy(kf2)
append!(cp, KeyedFrame(DataFrame(; a=6:8, d=9:11), [:a, :d]))
@test cp == KeyedFrame(DataFrame(; a=1:8, d=4:11), :a)
# Test return type of `append!`
@test isa(
append!(deepcopy(kf2), KeyedFrame(DataFrame(; a=6:8, d=9:11), [:a, :d])),
KeyedFrame
)
end
@testset "delete!" begin
cp = deepcopy(kf1)
delete!(cp, 1)
@test cp == KeyedFrame(DataFrame(; a=2:10, b=3:11, c=4:12), [:a, :b])
cp = deepcopy(kf1)
delete!(cp, 1:4)
@test cp == KeyedFrame(DataFrame(; a=5:10, b=6:11, c=7:12), [:a, :b])
cp = deepcopy(kf1)
delete!(cp, [1, 10])
@test cp == KeyedFrame(DataFrame(; a=2:9, b=3:10, c=4:11), [:a, :b])
# Test return type of `delete!`
@test delete!(deepcopy(kf1), 1) isa KeyedFrame
end
@testset "select!" begin
for ind in (:b, 2, [:b], [2])
cp = deepcopy(kf1)
select!(cp, ind)
@test cp == KeyedFrame(DataFrame(; b=2:11), [:b])
end
for ind in (:b, 2, [:b], [2])
cp = deepcopy(kf1)
select!(cp, Not(ind))
@test cp == KeyedFrame(DataFrame(; a=1:10, c=3:12), [:a])
end
for ind in ([:a, :c], [1, 3])
cp = deepcopy(kf1)
select!(cp, ind)
@test cp == KeyedFrame(DataFrame(; a=1:10, c=3:12), [:a])
end
for ind in ([:a, :c], [1, 3])
cp = deepcopy(kf1)
select!(cp, Not(ind))
@test cp == KeyedFrame(DataFrame(; b=2:11), [:b])
end
for ind in ([:a, :b], [1, 2])
cp = deepcopy(kf1)
select!(cp, ind)
@test cp == KeyedFrame(DataFrame(; a=1:10, b=2:11), [:a, :b])
end
for ind in ([:a, :b], [1, 2])
cp = deepcopy(kf1)
select!(cp, Not(ind))
@test cp == KeyedFrame(DataFrame(; c=3:12), Symbol[])
end
for ind in (:d, 4, [:a, :d], [1, 4])
cp = deepcopy(kf1)
@test_throws Exception select!(cp, Not(ind))
end
# Test return type of `select!`
@test isa(select!(deepcopy(kf1), :b), KeyedFrame)
end
@testset "select" begin
for ind in (:b, 2, [:b], [2])
cp = deepcopy(kf1)
kf = select(cp, ind)
@test cp == kf1
@test kf == KeyedFrame(DataFrame(; b=2:11), [:b])
end
for ind in (:b, 2, [:b], [2])
cp = deepcopy(kf1)
kf = select(cp, Not(ind))
@test kf == KeyedFrame(DataFrame(; a=1:10, c=3:12), [:a])
end
for ind in ([:a, :c], [1, 3])
cp = deepcopy(kf1)
kf = select(cp, ind)
@test kf == KeyedFrame(DataFrame(; a=1:10, c=3:12), [:a])
end
for ind in ([:a, :c], [1, 3])
cp = deepcopy(kf1)
kf = select(cp, Not(ind))
@test kf == KeyedFrame(DataFrame(; b=2:11), [:b])
end
for ind in ([:a, :b], [1, 2])
cp = deepcopy(kf1)
kf = select(cp, ind)
@test kf == KeyedFrame(DataFrame(; a=1:10, b=2:11), [:a, :b])
end
for ind in ([:a, :b], [1, 2])
cp = deepcopy(kf1)
kf = select(cp, Not(ind))
@test kf == KeyedFrame(DataFrame(; c=3:12), Symbol[])
end
for ind in (:d, 4, [:a, :d], [1, 4])
cp = deepcopy(kf1)
@test_throws Exception select(cp, Not(ind))
end
# Test return type of `select!`
@test isa(select(deepcopy(kf1), :b), KeyedFrame)
end
@testset "rename" begin
initial = copy(kf1)
expected = KeyedFrame(DataFrame(; new_a=1:10, b=2:11, new_c=3:12), [:new_a, :b])
@test rename(initial, :a => :new_a, :c => :new_c) == expected
@test initial == kf1
@test rename(initial, [:a => :new_a, :c => :new_c]) == expected
@test initial == kf1
@test rename(initial, Dict(:a => :new_a, :c => :new_c)) == expected
@test initial == kf1
@test rename(x -> x == :b ? x : Symbol("new_$x"), initial) == expected
@test initial == kf1
@test rename!(initial, :a => :new_a, :c => :new_c) == expected
@test initial == expected
initial = copy(kf1)
@test rename!(initial, [:a => :new_a, :c => :new_c]) == expected
@test initial == expected
initial = copy(kf1)
@test rename!(initial, Dict(:a => :new_a, :c => :new_c)) == expected
@test initial == expected
initial = copy(kf1)
@test rename!(x -> x == :b ? x : Symbol("new_$x"), initial) == expected
@test initial == expected
end
@testset "unique" begin
kf4 = KeyedFrame(DataFrame(; a=[1, 2, 3, 1, 2], b=[1, 2, 3, 4, 2], c=1:5), [:a, :b])
@test nonunique(kf4) == [false, false, false, false, true]
@test nonunique(kf4, :a) == [false, false, false, true, true]
# Use default columns (key)
expected = KeyedFrame(DataFrame(; a=[1, 2, 3, 1], b=1:4, c=1:4), [:a, :b])
@test isequal(unique(kf4), expected)
cp = deepcopy(kf4)
unique!(cp)
@test isequal(cp, expected)
# Specify columns
expected = KeyedFrame(DataFrame(; a=1:3, b=1:3, c=1:3), [:a, :b])
@test isequal(unique(kf4, :a), expected)
cp = deepcopy(kf4)
unique!(cp, :a)
@test isequal(cp, expected)
# Test return type of `unique!`
@test isa(unique!(deepcopy(kf1)), KeyedFrame)
@test isa(unique!(deepcopy(kf1), :a), KeyedFrame)
end
@testset "join" begin
expected = KeyedFrame(DataFrame(; a=1:5, b=2:6, c=3:7, d=4:8), [:a, :b])
@test isequal(innerjoin(kf1, kf2), expected)
@test isequal(innerjoin(kf1, df2), expected) # Join a KeyedFrame and a DF
@test isequal(innerjoin(df1, kf2), DataFrame(expected)) # Join a DF and a KeyedFrame
@test isequal(rightjoin(kf1, kf2), expected)
@test isequal(rightjoin(kf1, df2), expected)
@test isequal(rightjoin(df1, kf2), DataFrame(expected))
expected = KeyedFrame(DataFrame(; a=1:5, d=4:8, b=2:6, c=3:7), [:a, :b])
@test isequal(leftjoin(kf2, kf1), expected)
expected = KeyedFrame(DataFrame(; a=1:5, d=4:8, b=2:6, c=3:7), [:a])
@test isequal(leftjoin(kf2, df1), expected)
@test isequal(leftjoin(df2, kf1), DataFrame(expected))
expected = KeyedFrame(
DataFrame(; a=1:10, b=2:11, c=3:12, d=[4:8; fill(missing, 5)]), [:a, :b]
)
@test isequal(outerjoin(kf1, kf2), expected)
@test isequal(outerjoin(kf1, df2), expected)
@test isequal(outerjoin(df1, kf2), DataFrame(expected))
@test isequal(leftjoin(kf1, kf2), expected)
@test isequal(leftjoin(kf1, df2), expected)
@test isequal(leftjoin(df1, kf2), DataFrame(expected))
expected = KeyedFrame(
DataFrame(; a=1:10, d=[4:8; fill(missing, 5)], b=2:11, c=3:12), [:a, :b]
)
@test isequal(rightjoin(kf2, kf1), expected)
expected = KeyedFrame(
DataFrame(; a=1:10, d=[4:8; fill(missing, 5)], b=2:11, c=3:12), :a
)
@test isequal(rightjoin(kf2, df1), expected)
expected = DataFrame(; a=1:10, d=[4:8; fill(missing, 5)], b=2:11, c=3:12)
@test isequal(rightjoin(df2, kf1), expected)
expected = KeyedFrame(DataFrame(; a=1:5, b=2:6, c=3:7), [:a, :b])
@test isequal(semijoin(kf1, kf2), expected)
@test isequal(semijoin(kf1, df2), expected)
@test isequal(semijoin(df1, kf2), DataFrame(expected))
expected = KeyedFrame(DataFrame(; a=1:5, d=4:8), :a)
@test isequal(semijoin(kf2, kf1), expected)
@test isequal(semijoin(kf2, df1), expected)
@test isequal(semijoin(df2, kf1), DataFrame(expected))
expected = KeyedFrame(DataFrame(; a=6:10, b=7:11, c=8:12), [:a, :b])
@test isequal(antijoin(kf1, kf2), expected)
@test isequal(antijoin(kf1, df2), expected)
@test isequal(antijoin(df1, kf2), DataFrame(expected))
expected = KeyedFrame(DataFrame(; a=[], d=[]), :a)
@test isequal(antijoin(kf2, kf1), expected)
@test isequal(antijoin(kf2, df1), expected)
@test isequal(antijoin(df2, kf1), DataFrame(expected))
expected = KeyedFrame(
DataFrame(; a=[1, 2, 4], d=[4, 5, 7], e=[2, 5, 2], f=[3, 2, 1]), [:a, :e]
)
@test isequal(innerjoin(kf2, kf3), expected)
expected = KeyedFrame(
DataFrame(;
a=[1, 2, 3, 4, 5, 4, 1],
d=[4, 5, 6, 7, 8, 2, 2],
f=[missing, 2, missing, missing, missing, 1, 3],
),
:a, # Key :e disappears, because it's renamed :d by the join
)
@test isequal(outerjoin(kf2, kf3; on=[:a => :a, :d => :e]), expected)
end
@testset "permutecols!" begin
cp = deepcopy(kf1)
permutecols!(cp, [1, 3, 2])
@test isequal(cp, KeyedFrame(DataFrame(; a=1:10, c=3:12, b=2:11), [:a, :b]))
permutecols!(cp, [2, 3, 1])
@test isequal(cp, KeyedFrame(DataFrame(; c=3:12, b=2:11, a=1:10), [:a, :b]))
@test_throws Exception permutecols!(cp, [1, 2, 3, 4])
# Test return type of `permutecols!`
@test isa(permutecols!(deepcopy(kf1), [1, 2, 3]), KeyedFrame)
end
@testset "deprecated" begin
cp = deepcopy(kf1)
@test_deprecated deletecols!(cp, :b)
end
end
| KeyedFrames | https://github.com/invenia/KeyedFrames.jl.git |
|
[
"MIT"
] | 1.2.1 | b9a0be13edfd6b831cada22f5ac9a05c0ede6018 | docs | 704 | # KeyedFrames
[](https://invenia.github.io/KeyedFrames.jl/stable)
[](https://invenia.github.io/KeyedFrames.jl/latest)
[](https://travis-ci.com/invenia/KeyedFrames.jl)
[](https://codecov.io/gh/invenia/KeyedFrames.jl)
A `KeyedFrame` is a `DataFrame` that also stores a vector of column names that together act
as a unique key, which can be used to determine which columns to `join`, `unique`, and
`sort` on by default.
| KeyedFrames | https://github.com/invenia/KeyedFrames.jl.git |
|
[
"MIT"
] | 1.2.1 | b9a0be13edfd6b831cada22f5ac9a05c0ede6018 | docs | 4956 | # KeyedFrames
A `KeyedFrame` is a `DataFrame` that also stores a vector of column names that together act
as a unique key.
This key is used to provide default column information to `join`, `unique`, and `sort` when
this information is not provided by the user.
## Constructor
```julia
KeyedFrame(df::DataFrame, key::Vector)
```
Create an `KeyedFrame` using the provided `DataFrame`; `key` specifies the columns
to use by default when performing a `join` on `KeyedFrame`s when `on` is not provided.
### Example
```julia
julia> kf1 = KeyedFrame(DataFrame(; a=1:10, b=2:11, c=3:12), [:a, :b])
10×3 KeyedFrames.KeyedFrame
│ Row │ a │ b │ c │
├─────┼────┼────┼────┤
│ 1 │ 1 │ 2 │ 3 │
│ 2 │ 2 │ 3 │ 4 │
│ 3 │ 3 │ 4 │ 5 │
│ 4 │ 4 │ 5 │ 6 │
│ 5 │ 5 │ 6 │ 7 │
│ 6 │ 6 │ 7 │ 8 │
│ 7 │ 7 │ 8 │ 9 │
│ 8 │ 8 │ 9 │ 10 │
│ 9 │ 9 │ 10 │ 11 │
│ 10 │ 10 │ 11 │ 12 │
julia> kf2 = KeyedFrame(DataFrame(; a=[4, 2, 1], d=[2, 5, 2], e=1:3), [:d, :a])
3×3 KeyedFrames.KeyedFrame
│ Row │ a │ d │ e │
├─────┼───┼───┼───┤
│ 1 │ 4 │ 2 │ 1 │
│ 2 │ 2 │ 5 │ 2 │
│ 3 │ 1 │ 2 │ 3 │
```
## Joining
When performing a `join`, if only one of the arguments is an `KeyedFrame` and `on` is not
specified, the frames will be joined on the `key` of the `KeyedFrame`. If both
arguments are `KeyedFrame`s, `on` will default to the intersection of their respective
indices. In all cases, the result of the `join` will share a type with the first argument.
### Example
```julia
julia> join(kf1, kf2)
3×5 KeyedFrames.KeyedFrame
│ Row │ a │ b │ c │ d │ e │
├─────┼───┼───┼───┼───┼───┤
│ 1 │ 1 │ 2 │ 3 │ 2 │ 3 │
│ 2 │ 2 │ 3 │ 4 │ 5 │ 2 │
│ 3 │ 4 │ 5 │ 6 │ 2 │ 1 │
julia> keys(ans)
3-element Array{Symbol,1}:
:a
:b
:d
```
Although the keys of both `KeyedFrame`s are used in constructing the default value for `on`,
the user may still supply the `on` keyword if they wish:
```julia
julia> join(kf1, kf2; on=[:a => :a, :b => :d], kind=:outer)
12×4 KeyedFrames.KeyedFrame
│ Row │ a │ b │ c │ e │
├─────┼────┼────┼─────────┼─────────┤
│ 1 │ 1 │ 2 │ 3 │ 3 │
│ 2 │ 2 │ 3 │ 4 │ missing │
│ 3 │ 3 │ 4 │ 5 │ missing │
│ 4 │ 4 │ 5 │ 6 │ missing │
│ 5 │ 5 │ 6 │ 7 │ missing │
│ 6 │ 6 │ 7 │ 8 │ missing │
│ 7 │ 7 │ 8 │ 9 │ missing │
│ 8 │ 8 │ 9 │ 10 │ missing │
│ 9 │ 9 │ 10 │ 11 │ missing │
│ 10 │ 10 │ 11 │ 12 │ missing │
│ 11 │ 4 │ 2 │ missing │ 1 │
│ 12 │ 2 │ 5 │ missing │ 2 │
julia> keys(ans)
2-element Array{Symbol,1}:
:a
:b
```
Notice that `:d` is no longer a key (as it has been renamed `:c`). It's important to note
that while the user may expect `:c` to be part of the new frame's key (as `:d` was), `join`
does not infer this.
## Deduplication
When calling `unique` (or `unique!`) on a KeyedFrame without providing a `cols` argument,
`cols` will default to the `key` of the `KeyedFrame` instead of all columns. If you wish to
remove only rows that are duplicates across all columns (rather than just across the key),
you can call `unique!(kf, names(kf))`.
### Example
```julia
julia> kf3 = KeyedFrame(DataFrame(; a=[1, 2, 3, 2, 1], b=[1, 2, 3, 2, 5], c=1:5), [:a, :b])
5×3 KeyedFrames.KeyedFrame
│ Row │ a │ b │ c │
├─────┼───┼───┼───┤
│ 1 │ 1 │ 1 │ 1 │
│ 2 │ 2 │ 2 │ 2 │
│ 3 │ 3 │ 3 │ 3 │
│ 4 │ 2 │ 2 │ 4 │
│ 5 │ 1 │ 5 │ 5 │
julia> unique(kf3)
4×3 KeyedFrames.KeyedFrame
│ Row │ a │ b │ c │
├─────┼───┼───┼───┤
│ 1 │ 1 │ 1 │ 1 │
│ 2 │ 2 │ 2 │ 2 │
│ 3 │ 3 │ 3 │ 3 │
│ 4 │ 1 │ 5 │ 5 │
julia> unique(kf3, :a)
3×3 KeyedFrames.KeyedFrame
│ Row │ a │ b │ c │
├─────┼───┼───┼───┤
│ 1 │ 1 │ 1 │ 1 │
│ 2 │ 2 │ 2 │ 2 │
│ 3 │ 3 │ 3 │ 3 │
julia> unique(kf3, names(kf2))
5×3 KeyedFrames.KeyedFrame
│ Row │ a │ b │ c │
├─────┼───┼───┼───┤
│ 1 │ 1 │ 1 │ 1 │
│ 2 │ 2 │ 2 │ 2 │
│ 3 │ 3 │ 3 │ 3 │
│ 4 │ 2 │ 2 │ 4 │
│ 5 │ 1 │ 5 │ 5 │
```
## Sorting
When `sort`ing, if no `cols` keyword is supplied, the `key` is used to determine precedence.
```julia
julia> kf2
3×3 KeyedFrames.KeyedFrame
│ Row │ a │ d │ e │
├─────┼───┼───┼───┤
│ 1 │ 4 │ 2 │ 1 │
│ 2 │ 2 │ 5 │ 2 │
│ 3 │ 1 │ 2 │ 3 │
julia> keys(kf2)
2-element Array{Symbol,1}:
:d
:a
julia> sort(kf2)
3×3 KeyedFrames.KeyedFrame
│ Row │ a │ d │ e │
├─────┼───┼───┼───┤
│ 1 │ 1 │ 2 │ 3 │
│ 2 │ 4 │ 2 │ 1 │
│ 3 │ 2 │ 5 │ 2 │
```
## Equality
Two `KeyedFrame`s are considered equal to (`==`) each other if their data are equal and they
have the same `key`. (The order in which columns appear in the `key` is ignored for the
purposes of `==`, but is relevant when calling `isequal`. This means that it is possible to
have two `KeyedFrame`s that are considered equal but whose default sort order will be
different by virtue of having `key`s with different column ordering.)
A `KeyedFrame` and a `DataFrame` with identical data are also considered equal (`==` returns
`true`, though `isequal` will be false).
| KeyedFrames | https://github.com/invenia/KeyedFrames.jl.git |
|
[
"MIT"
] | 0.2.4 | c033fd69366d1234a50afdeca8a4f654dea40afd | code | 824 | using EHTImages
using Documenter
DocMeta.setdocmeta!(EHTImages, :DocTestSetup, :(using EHTImages); recursive=true)
makedocs(;
modules=[EHTImages],
authors="Kazunori Akiyama",
repo="https://github.com/EHTJulia/EHTImages.jl/blob/{commit}{path}#{line}",
sitename="EHTImages.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://EHTJulia.github.io/EHTImages.jl",
edit_link="main",
assets=String[]
),
pages=[
"Home" => "index.md",
"Intensity Images" => [
"intensityimages/abstractintensityimage.md",
"intensityimages/intensityimage.md"
],
"All Docstrings" => "autodocstrings.md",
]
)
deploydocs(;
repo="github.com/EHTJulia/EHTImages.jl",
devbranch="main"
)
| EHTImages | https://github.com/EHTJulia/EHTImages.jl.git |
|
[
"MIT"
] | 0.2.4 | c033fd69366d1234a50afdeca8a4f654dea40afd | code | 2119 | module EHTImages
# Import external packages
using Base
using ComradeBase
import ComradeBase:
imagepixels, intensitymap
using Dates
using DocStringExtensions # for docstrings
using EHTDimensionalData
using EHTNCDBase
using EHTUVData
using EHTUtils:
c, kB, σ2fwhm,
unitconv, get_unit,
Jy, K, rad, deg,
@throwerror,
mjd2datetime, datetime2mjd, jd2mjd, mjd2jd
using FFTW: fftfreq, fftshift, plan_fft, plan_ifft # for FFT
using FITSIO
using FLoops
using Formatting # for python-ish string formatter
using Logging
using Missings: disallowmissing # to load netcdf data
using NCDatasets # to hande netcdf files
using OrderedCollections # to use OrderedDictionary
using Parameters # for more flexible definitions of struct
using PythonCall: pyimport # to use python
import PythonPlot # to use matplotlib
import PythonPlot: imshow
using Unitful, UnitfulAngles, UnitfulAstro # for Units
using VLBISkyModels
# Include
# DataStorageTypes
include("datastoragetypes/datastoragetype.jl")
# Abstract Image Data Set
include("imagedatasets/abstract.jl")
# Intensity images
# Abstract Type
include("intensityimages/abstract/abstract.jl")
include("intensityimages/abstract/metadata.jl")
#include("intensityimages/abstract/convolution.jl")
#include("intensityimages/abstract/modelmap.jl")
include("intensityimages/abstract/plotting_tools.jl")
include("intensityimages/abstract/pythonplot.jl")
include("intensityimages/abstract/io/fitswriter.jl")
include("intensityimages/abstract/io/vlbiskymodels.jl")
# DiskIntensityImage
include("intensityimages/diskintensityimage/diskintensityimage.jl")
include("intensityimages/diskintensityimage/io/const.jl")
include("intensityimages/diskintensityimage/io/reader.jl")
include("intensityimages/diskintensityimage/io/writer.jl")
#include("intensityimages/diskintensityimage/convolution.jl")
# IntensityImage
include("intensityimages/intensityimage/intensityimage.jl")
include("intensityimages/intensityimage/io/reader.jl")
include("intensityimages/intensityimage/io/fitsreader.jl")
include("intensityimages/intensityimage/io/vlbiskymodels.jl")
end
| EHTImages | https://github.com/EHTJulia/EHTImages.jl.git |
|
[
"MIT"
] | 0.2.4 | c033fd69366d1234a50afdeca8a4f654dea40afd | code | 369 | """
$(TYPEDEF)
Internal type for specifying the nature of the location of data.
"""
abstract type DataStorageType end
"""
$(TYPEDEF)
Defines a trait that a states that data is disk based.
"""
struct IsDiskData <: DataStorageType end
"""
$(TYPEDEF)
Defines a trait that a states that data is memory based.
"""
struct NotDiskData <: DataStorageType end | EHTImages | https://github.com/EHTJulia/EHTImages.jl.git |
|
[
"MIT"
] | 0.2.4 | c033fd69366d1234a50afdeca8a4f654dea40afd | code | 3220 | export AbstractImageDataSet
export isdiskdata
"""
$(TYPEDEF)
The abstract type for image data sets handled in this module.
AbstractImageDataSet works as an Abstract Array-ish. To make it,
each image type needs to have four following methods.
(see: Julia Documentation for "Interfaces")
# Mandatory Methods that need to be defined
- `default_metadata`:
Return the default metadata for the image data set.
- `isdiskdata`:
Determines whether the data is disk-based or memory-based.
Return `IsDiskData()` if data is disk-based,
while return `NotDiskData()` if data is memory-based.
- `isopen`:
Check if data is accessible, return true for accessible data
and false if data is not accessible. This is relevant if
image is based on disk data.
- `iswritable`:
Check if data is accessible, return `true` for accessible data
and `false` if data is not accessible. This is relevant if
image is based on disk data.
# Methods provided
- `size`: returning a tuple containing the dimension of `AbstractImageDataSet.data`
- `getindex`: scalar or vector indexing
- `setindex!`: scalar or vector indexing assignment
- `firstindex`: returning the first index, used in `X[begin]`
- `lastindex`: returning the last index, used in `X[end]`
- `IndexStyle`: returning the index style
"""
abstract type AbstractImageDataSet end
# You wouldn't need to overwrite the following 5 methods.
@inline Base.size(image::AbstractImageDataSet, args...) = Base.size(image.data, args...)
@inline Base.setindex!(image::AbstractImageDataSet, value, key...) = Base.setindex!(image.data, value, key...)
@inline Base.firstindex(image::AbstractImageDataSet, args...) = Base.firstindex(image.data, args...)
@inline Base.lastindex(image::AbstractImageDataSet, args...) = Base.lastindex(image.data, args...)
@inline Base.IndexStyle(::AbstractImageDataSet) = Base.IndexCartesian()
# getindex would need to be overwritten to return an instance of sliced AbstractImageDataSet object
# rather than slided array of AbstractImageDataSet.data
@inline Base.getindex(image::AbstractImageDataSet, args...) = Base.getindex(image.data, args...)
"""
$(TYPEDSIGNATURES)
Determines whether the data is disk-based or memory-based.
Return `IsDiskData()` if data is disk-based,
while return `NotDiskData()` if data is memory-based.
"""
@inline isdiskdata(::AbstractImageDataSet) = IsDiskData()
"""
$(TYPEDSIGNATURES)
Check if data is accessible, return `true` for accessible data
and `false` if data is not accessible. This is relevant if
image is based on disk data.
"""
@inline Base.isopen(::AbstractImageDataSet) = false
"""
$(TYPEDSIGNATURES)
Check if data is accessible, return `true` for accessible data
and `false` if data is not accessible. This is relevant if
image is based on disk data.
"""
@inline Base.iswritable(::AbstractImageDataSet) = false
"""
$(FUNCTIONNAME)(::Type{<:AbstractImageDataSet}) -> OrderedDict{Symbol, Any}
$(FUNCTIONNAME)(::AbstractImageDataSet) -> OrderedDict{Symbol, Any}
Return default metadata for the image data set.
"""
function default_metadata(::Type{<:AbstractImageDataSet}) end
default_metadata(image::AbstractImageDataSet) = default_metadata(typeof(image)) | EHTImages | https://github.com/EHTJulia/EHTImages.jl.git |
|
[
"MIT"
] | 0.2.4 | c033fd69366d1234a50afdeca8a4f654dea40afd | code | 2327 | export AbstractIntensityImage
export get_xygrid
export get_uvgrid
export get_fov
"""
$(TYPEDEF)
This defines a basic interface for intensity images. It is a subtype of `AbstractImageDataSet`.
# Mandatory fields
- `data`: 5 dimensional array for intensity [x, y, polarization, frequency, time]
- `p`: 1 dimensional array for polarization codes in string (coordinate for polarization axis)
- `f`: 1 dimensional array for frequency in Hz (coordinate for frequency axis)
- `t`: 1 dimensional array for time in modified Julian dates (coordinate for time axis)
- `metadata`: Dict-like object to stock metadata
# Mandatory methods need to be defined.
See also documentations for `AbstractImageDataSet`.
"""
abstract type AbstractIntensityImage <: AbstractImageDataSet end
"""
get_xygrid(::AbstractIntensityImage, angunit) --> Tuple{StepRangeLen, StepRangeLen}
get_xygrid(metadata, angunit) --> Tuple{StepRangeLen, StepRangeLen}
Returning 1-dimensional StepRangeLen objects for the grids along with x and y axis
in the given angular unit specified by angunit. The input could be a intensity image data set or
its metadata.
# Arguments
- `angunit::Union{Unitful.Quantity,Unitful.Units,String}=rad`: Angular units of the output pixel grids.
"""
function get_xygrid(
image::AbstractIntensityImage,
angunit::Union{Unitful.Quantity,Unitful.Units,String}=rad)
return get_xygrid(image.metadata, angunit)
end
"""
get_uvgrid(image::AbstractIntensityImage, dofftshift=true)
get_uvgrid(metadata, dofftshift::Bool=true)
returning u and v grids corresponding to the image field of view and pixel size.
"""
function get_uvgrid(image::AbstractIntensityImage, dofftshift::Bool=true)
return get_uvgrid(image.metadata, dofftshift)
end
"""
get_fov(::AbstractIntensityImage, angunit) --> Tuple
get_fov(metadata, angunit) --> Tuple
Returning the field of the view for the grids along with x and y axis
in the given angular unit specified by angunit. The input could be a intensity image data set or
its metadata.
# Arguments
- `angunit::Union{Unitful.Quantity,Unitful.Units,String}=rad`: Angular units of the output pixel grids.
"""
function get_fov(
image::AbstractIntensityImage,
angunit::Union{Unitful.Quantity,Unitful.Units,String}=rad)
return get_fov(image.metadata, angunit)
end
| EHTImages | https://github.com/EHTJulia/EHTImages.jl.git |
|
[
"MIT"
] | 0.2.4 | c033fd69366d1234a50afdeca8a4f654dea40afd | code | 2803 | export convolve, convolve!
"""
$(TYPEDSIGNATURES) -> AbstractIntensityImage
Convolve the input image with a given model, and return
the convolved image.
# Arguments
- `image::AbstractIntensityImage`:
The input image. It must be not disk-based.
- `model::EHTModels.AbstractModel`:
The model to be used as the convolution kernel.
- `ex=SequentialEx()`
An executor of FLoops.jl.
"""
function convolve(
image::AbstractIntensityImage,
model::EHTModels.AbstractModel;
ex=SequentialEx()
)::AbstractIntensityImage
# check if the input is disk-based or not.
if isdiskdata(image) == IsDiskData()
@throwerror ArgumentError "Please use `convolve(image, filename, modelname; keywords)` instead."
end
# copy image
newimage = copy(image)
# run convolution
convolve_base!(newimage, model, ex=ex)
return newimage
end
"""
$(TYPEDSIGNATURES)
Convolve the input image with a given model.
# Arguments
- `image::AbstractIntensityImage`:
The input image. It must be not disk-based.
- `model::EHTModels.AbstractModel`:
The model to be used as the convolution kernel.
- `ex=SequentialEx()`
An executor of FLoops.jl.
"""
function convolve!(
image::AbstractIntensityImage,
model::EHTModels.AbstractModel;
ex=SequentialEx()
)
# check if the input image is writable or not.
if iswritable(image) == false
@throwerror ArgumentError "Input image is not writable."
end
# execute convolution
convolve_base!(image, model, ex=ex)
return nothing
end
"""
$(TYPEDSIGNATURES)
Base function for convolving the input image with a given model.
# Arguments
- `image::AbstractIntensityImage`:
The input image. It must be not disk-based.
- `model::EHTModels.AbstractModel`:
The model to be used as the convolution kernel.
- `ex=SequentialEx()`
An executor of FLoops.jl.
"""
function convolve_base!(
image::AbstractIntensityImage,
model::EHTModels.AbstractModel;
ex=SequentialEx()
)
# get the number of pixels
nx, ny, np, nf, nt = size(image)
# get uv gridend
ug, vg = get_uvgrid(image, false)
# mapout kernel
vkernel = Matrix{ComplexF64}(undef, length(ug), length(vg))
@floop ex for uidx = 1:nx, vidx = 1:ny
@inbounds vkernel[uidx, vidx] = conj(visibility_point(model, ug[uidx], vg[vidx]))
end
# create fft plan
fp = plan_fft(image.data[:, :, 1, 1, 1])
ifp = plan_ifft(complex(image.data[:, :, 1, 1, 1]))
# exectute fft-based convlution with the kernel
@floop ex for sidx = 1:np, fidx = 1:nf, tidx = 1:nt
@inbounds imarr = image.data[:, :, sidx, fidx, tidx]
@inbounds vim = fp * imarr
@inbounds vim .*= vkernel
@inbounds image.data[:, :, sidx, fidx, tidx] = real(ifp * vim)
end
end
| EHTImages | https://github.com/EHTJulia/EHTImages.jl.git |
|
[
"MIT"
] | 0.2.4 | c033fd69366d1234a50afdeca8a4f654dea40afd | code | 4194 | export default_metadata
export copy_metadata!
"""
intensityimage_metadata_default::NamedTuple
A tuple for the default metadata keys and values for `AbstractIntensityImage`.
"""
const intensityimage_metadata_default = (
format="EHTJulia Intensity Image NetCDF4 Format",
version=v"0.1.0",
source="Nameless Source",
instrument="Nameless Instrument",
observer="Nameless Observer",
coordsys="icrs",
equinox=-1,
nx=1,
dx=1.0,
xref=0.0,
ixref=0.5,
xunit="rad",
ny=1,
dy=1.0,
yref=0.0,
iyref=0.5,
yunit="rad",
np=1,
polrep="stokes",
nf=1,
funit="Hz",
nt=1,
tunit="MJD",
fluxunit="Jy/Pixel",
pulsetype="delta",
)
int(x) = round(Int64, x)
"""
intensityimage_metadata_default::NamedTuple
A tuple of types for metadata keys in `intensityimage_metadata_default`.
"""
const intensityimage_metadata_type = (
format=string,
version=VersionNumber,
source=string,
instrument=string,
observer=string,
coordsys=string,
equinox=float,
nx=int,
dx=float,
xref=float,
ixref=float,
xunit=string,
ny=int,
dy=float,
yref=float,
iyref=float,
yunit=string,
np=int,
polrep=string,
nf=int,
funit=string,
nt=int,
tunit=string,
fluxunit=string,
pulsetype=string,
)
"""
intensityimage_metadata_compat::NamedTuple
A tuple of available values for some of keys in `intensityimage_metadata_default`.
"""
const intensityimage_metadata_compat = (
coordsys=["icrs"],
equinox=[-1],
xunit=["rad"],
yunit=["rad"],
np=[1, 4],
polrep=["stokes"],
funit=["Hz"],
tunit=["MJD"],
fluxunit=["Jy/Pixel"],
pulsetype=["delta", "rectangle"],
)
"""
default_metadata(dataset) -> OrderedDict
Return the default metadata of the given dataset.
"""
@inline function default_metadata(::Type{<:AbstractIntensityImage})
dict = OrderedDict{Symbol,Any}()
for key in keys(intensityimage_metadata_default)
dict[key] = intensityimage_metadata_default[key]
end
return dict
end
@inline function get_xygrid(
metadata,
angunit::Union{Unitful.Quantity,Unitful.Units,String}=rad)
# Get scaling for the flux unit
if angunit == rad
aunitconv = 1
else
# get unit
if angunit isa String
aunit = get_unit(angunit)
else
aunit = angunit
end
# get scaling factor
aunitconv = unitconv(rad, aunit)
end
nx = metadata[:nx]
ny = metadata[:ny]
dx = metadata[:dx] * aunitconv
dy = metadata[:dy] * aunitconv
ixref = metadata[:ixref]
iyref = metadata[:iyref]
xg = -dx * ((1-ixref):1:(nx-ixref))
yg = dy * ((1-iyref):1:(ny-iyref))
return (xg, yg)
end
@inline function get_uvgrid(metadata, dofftshift::Bool=true)
# nx, ny
nx = metadata[:nx]
ny = metadata[:ny]
# dx, dy
dxrad = metadata[:dx]
dyrad = metadata[:dy]
ug = fftfreq(nx, -1 / dxrad)
vg = fftfreq(ny, 1 / dyrad)
if dofftshift
ug = fftshift(ug)
vg = fftshift(vg)
end
return (ug, vg)
end
@inline function get_fov(
metadata,
angunit::Union{Unitful.Quantity,Unitful.Units,String}=rad)
# Get scaling for the flux unit
if angunit == rad
aunitconv = 1
else
# get unit
if angunit isa String
aunit = get_unit(angunit)
else
aunit = angunit
end
# get scaling factor
aunitconv = unitconv(rad, aunit)
end
nx = metadata[:nx]
ny = metadata[:ny]
dx = metadata[:dx] * aunitconv
dy = metadata[:dy] * aunitconv
return (nx * dx, ny * dy)
end
"""
copy_metadata!(image::AbstractIntensityImage, uvdataset::AbstractUVDataSet)
copy metadata from the given uvdataset.
"""
@inline function copy_metadata!(image::AbstractIntensityImage, uvdataset::EHTUVData.AbstractUVDataSet)
for key in [:source, :instrument, :observer, :coordsys, :equinox]
image.metadata[key] = uvdataset.metadata[key]
end
image.metadata[:xref] = uvdataset.metadata[:ra]
image.metadata[:yref] = uvdataset.metadata[:dec]
end
| EHTImages | https://github.com/EHTJulia/EHTImages.jl.git |
|
[
"MIT"
] | 0.2.4 | c033fd69366d1234a50afdeca8a4f654dea40afd | code | 1098 | export add!
function Base.map!(
image::AbstractIntensityImage,
model::EHTModels.AbstractModel;
ex=SequentialEx()
)
# get the number of pixels
nx, ny, np, nf, nt = size(image)
# get xy grid
xg, yg = get_xygrid(image)
# grid size
dxdy = image.metadata[:dx] * image.metadata[:dy]
# mapout kernel
imarray = Matrix{Float64}(undef, length(xg), length(yg))
@floop ex for xidx = 1:nx, yidx = 1:ny
@inbounds image.data[xidx, yidx, :, :, :] .= intensity_point(model, xg[xidx], yg[yidx]) * dxdy
end
end
function add!(
image::AbstractIntensityImage,
model::EHTModels.AbstractModel;
ex=SequentialEx()
)
# get the number of pixels
nx, ny, np, nf, nt = size(image)
# get xy grid
xg, yg = get_xygrid(image)
# grid size
dxdy = image.metadata[:dx] * image.metadata[:dy]
# mapout kernel
imarray = Matrix{Float64}(undef, length(xg), length(yg))
@floop ex for xidx = 1:nx, yidx = 1:ny
@inbounds image.data[xidx, yidx, :, :, :] .+= intensity_point(model, xg[xidx], yg[yidx]) * dxdy
end
end
| EHTImages | https://github.com/EHTJulia/EHTImages.jl.git |
|
[
"MIT"
] | 0.2.4 | c033fd69366d1234a50afdeca8a4f654dea40afd | code | 2110 | export get_bconv
"""
$(FUNCTIONNAME)(image::AbstractIntensityImage;
fluxunit::Union{Unitful.Quantity,Unitful.Units,String}=Jy,
saunit::Union{Unitful.Quantity,Unitful.Units,String}="pixel")
get a conversion factor from Jy/pixel (used in AbstractIntensityImage.data)
to an arbitrary unit for the intensity. fluxunit is for the unit of
the flux density (e.g. Jy, mJy, μJy) or brightness temperture (e.g. K),
while saunit is for the unit of the solid angle (pixel, beam, mas, μJy).
"""
function get_bconv(
image::AbstractIntensityImage;
fluxunit::Union{Unitful.Quantity,Unitful.Units,String}=Jy,
saunit::Union{Unitful.Quantity,Unitful.Units,String}="pixel")
# Get scaling for the flux unit
if fluxunit isa String
funit = get_unit(fluxunit)
else
funit = fluxunit
end
if dimension(funit) == dimension(K)
# pixel size in radian
dx = image.metadata[:dx]
dy = image.metadata[:dy]
# frequency in Hz
nu = image.f # frequency in Hz
# conversion factor from Jy to K
Jy2K = c^2 / (2 * kB) / dx / dy * 1e-26 ./ nu .^ 2
return Jy2K * unitconv(K, funit)
end
fluxconv = unitconv(Jy, funit)
# Get scaling for the solid angles
if saunit isa String
saunit_low = lowercase(saunit)
if startswith(saunit_low, 'p')
saconv = 1
elseif startswith(saunit_low, 'b')
# pixel size in radian
dx = image.metadata[:dx]
dy = image.metadata[:dy]
# beam size in radian
bmaj = image.metadata[:beam_maj]
bmin = image.metadata[:beam_min]
pixelsa = dx * dy
beamsa = bmaj * bmin * pi / (4 * log(2))
saconv = pixelsa / beamsa
else
saconv = unitconv(rad^2, get_unit(saunit))
end
elseif saunit isa Union{Unitful.Quantity,Unitful.Units}
saconv = unitconv(rad^2, saunit)
else
@throwerror ArgumentError "saunit must be 'pixel', 'beam', or units for solid angles"
end
return fluxconv / saconv
end
| EHTImages | https://github.com/EHTJulia/EHTImages.jl.git |
|
[
"MIT"
] | 0.2.4 | c033fd69366d1234a50afdeca8a4f654dea40afd | code | 6675 | export get_imextent
export plot_colorbar
export plot_xylabel
export imshow
"""
$(FUNCTIONNAME)(::AbstractIntensityImage; kwargs..., imshowkwargs...)
Plot an image using PythonPlot. `imshowkwargs` are passed to PythonPlot.imshow.
Returns a dictionary that contains all python objects generated in the plot.
# Keyword Arguments
- `angunit`: angular unit for the axes. If nothing, the unit in the image
metadata is used. If a Unitful unit, it can be any angular unit.
- `fluxunit`:
- `saunit`: unit for the solid angle.
- `idx`: index of the image to plot. (polarization, frequency, time)
- `cmap`: colormap to use.
- `scale`: scaling of the image. Can be :log, :gamma or :linear.
- `gamma`: gamma value for the :gamma scaling.
- `dyrange`: dynamic range for the :log scaling.
- `vmax`: maximum value for the :linear and :gamma scaling.
- `vmin`: minimum value for the :linear and :gamma scaling.
- `relative`: if true, the vmin and vmax are relative to the maximum value.
- `axisoff`: if true, turn off the axis.
- `axislabel`: if true, plot the axis labels.
- `add_colorbar`: if true, add a colorbar.
- `interpolation`: interpolation method for the image.
"""
function imshow(
image::AbstractIntensityImage;
angunit::Union{String,Unitful.Units,Unitful.Quantity}=rad,
fluxunit::Union{String,Unitful.Units,Unitful.Quantity}=K,
saunit::Union{String,Unitful.Units,Unitful.Quantity}="pixel",
idx=[1, 1, 1],
cmap="afmhot",
scale::Symbol=:linear,
gamma::Number=0.5,
dyrange::Number=1000,
vmax=nothing,
vmin=nothing,
relative=false,
axisoff=false,
axislabel=true,
add_colorbar=true,
interpolation="bilinear",
imshowargs...)
# get angular unit
dopixel::Bool = false
if isnothing(angunit)
aunit = get_unit(image.metadata["angunit"])
elseif angunit isa String
if startswith(lowercase(angunit), "pixel")
aunit = "pixel"
dopixel = true
else
aunit = get_unit(angunit)
end
else
aunit = angunit
end
# get imextent
imextent = get_imextent(image, angunit)
# get flux unit
if fluxunit isa String
funit = get_unit(fluxunit)
else
funit = fluxunit
end
# Convert the intensity unit
bconv = get_bconv(image, fluxunit=funit, saunit=saunit)
if dimension(funit) == dimension(K)
bconv = bconv[idx[2]]
end
imarr = image.data[:, :, idx...] * bconv
if vmax isa Nothing
nmax = maximum(imarr)
else
nmax = vmax
end
if scale == :log
matplotlib = pyimport("matplotlib")
nmin = nmax / dyrange
norm = matplotlib.colors.LogNorm(vmin=nmin, vmax=nmax)
imarr[imarr.<nmax/dyrange] .= nmin
nmin = nothing
nmax = nothing
elseif scale == :gamma
matplotlib = pyimport("matplotlib")
if vmin isa Nothing
nmin = 0
elseif relative == true
nmin = vmin * nmax
end
imarr[imarr.<0] .= 0
norm = matplotlib.colors.PowerNorm(vmin=nmin, vmax=nmax, gamma=gamma)
nmin = nothing
nmax = nothing
elseif scale == :linear
if vmin isa Nothing
nmin = minimum([minimum(imarr), 0])
elseif relative
nmin = vmin * nmax
else
nmin = vmin
end
norm = nothing
else
@throwerror ArgumentError "scale must be :log, :gamma or :linear"
end
imsobj = PythonPlot.imshow(
transpose(imarr),
origin="lower", extent=imextent,
vmin=nmin, vmax=nmax, norm=norm,
cmap=cmap, interpolation=interpolation,
imshowargs...)
outdict = Dict()
outdict["imshowobj"] = imsobj
if axisoff
PythonPlot.axis("off")
elseif axislabel
output = plot_xylabel(aunit)
outdict["xlabelobj"] = output[1]
outdict["ylabelobj"] = output[2]
end
if add_colorbar
outdict["colorbarobj"] = plot_colorbar(funit, saunit)
end
return outdict
end
function get_imextent(
image::AbstractIntensityImage,
angunit::Union{String,Unitful.Units,Unitful.Quantity,Nothing}=nothing)
# check angular units
dopixel = false
if isnothing(angunit)
aunit = get_unit(image.metadata["angunit"])
elseif angunit isa String
dopixel = startswith(lowercase(angunit), "pix")
if dopixel == false
aunit = get_unit(angunit)
end
else
aunit = angunit
end
if dopixel == false
angconv = unitconv(u"rad", aunit)
nx, ny = size(image.data)[1:2]
dx = image.metadata[:dx]
dy = image.metadata[:dy]
ixref = image.metadata[:ixref]
iyref = image.metadata[:iyref]
xmax = -dx * (1 - ixref - 0.5)
xmin = -dx * (nx - ixref + 0.5)
ymax = dy * (ny - iyref + 0.5)
ymin = dy * (1 - iyref - 0.5)
return [xmax, xmin, ymin, ymax] * angconv
else
nx, ny = size(image.data)[1:2]
ixref = image.metadata[:ixref]
iyref = image.metadata[:iyref]
xmax = -1 * (1 - ixref - 0.5)
xmin = -1 * (nx - ixref + 0.5)
ymax = (ny - iyref + 0.5)
ymin = (1 - iyref - 0.5)
return [xmax, xmin, ymin, ymax]
end
end
function plot_xylabel(
angunit::Union{Unitful.Units,Unitful.Quantity,String};
labelargs...)
# get the label for angular units
if angunit isa String
if startswith(lowercase(angunit), "pix")
unitlabel = "pixel"
else
unitlabel = get_unit(angunit)
end
end
unitlabel = string(angunit)
# make the labels and plot them
xaxis_label = format("Relative RA ({})", unitlabel)
yaxis_label = format("Relative Dec ({})", unitlabel)
xlabobj = PythonPlot.xlabel(xaxis_label, labelargs...)
ylabobj = PythonPlot.ylabel(yaxis_label, labelargs...)
return xlabobj, ylabobj
end
function plot_colorbar(
fluxunit,
saunit;
colorbarargs...)
label = intensity_label(fluxunit, saunit)
cbarobj = PythonPlot.colorbar(label=label, colorbarargs...)
return cbarobj
end
function intensity_label(
funit,
saunit,
)
funitlabel = string(funit)
if dimension(funit) == dimension(K)
saunitlabel = ""
elseif saunit == "pixel"
saunitlabel = "/" * "pixel"
else
saunitlabel = "/" * string(saunit) * "^2"
end
intunitlabel = funitlabel * saunitlabel
if dimension(funit) == dimension(K)
label = format("Brightness Temperature ({})", intunitlabel)
else
label = format("Intensity ({})", intunitlabel)
end
end
| EHTImages | https://github.com/EHTJulia/EHTImages.jl.git |
|
[
"MIT"
] | 0.2.4 | c033fd69366d1234a50afdeca8a4f654dea40afd | code | 4464 | export save_fits
export save_fits!
"""
save_fits[!](image::AbstractIntensityImage, filename::AbstractString, idx=(1, 1); fitstype::Symbol=:casa)
Saving the image into a FITS file in a specifed format.
# Arguments
- `image::AbstractIntensityImage`: the input image
- `filename::AbstractString`: the name of the output FITS file
- `idx`: the index of the saved image. Should be (frequency index, time index). Default to `(1,1)`.
# Keywords
- `fitstype::Symbol`: the format type of the output FITS. Availables are `:casa` (CASA compatible).
"""
function save_fits!(image::AbstractIntensityImage, filename::AbstractString, idx=(1, 1); fitstype::Symbol=:casa)
if fitstype == :casa
save_fits_casa!(image, filename, idx)
else
@throwerror ArgumentError "`fitstype` should be `:casa`."
end
end
# quick shortcut
save_fits = save_fits!
# saving imagedata in a CASA compatible format
function save_fits_casa!(image::AbstractIntensityImage, filename::AbstractString, idx=[1, 1])
# size of the image and corresponding coordinates
nx, ny, np, _ = size(image)
fidx, tidx = idx
# Image Metadata
metadata = image.metadata
# quick shortcuts
obsra = rad2deg(metadata[:xref])
obsdec = rad2deg(metadata[:yref])
reffreq = image.f[fidx]
mjd = image.t[tidx]
# Open FITS file in the write mode (allowing to overwrite)
f = FITS(filename, "w")
# Initialize headers
header = FITSHeader(["COMMENT"], [NaN], ["This FITS file was created in EHTImages.jl."])
# Set headers
# a quick shortcut
function set!(header::FITSHeader, keyname::AbstractString, value, comment::AbstractString="")
header[keyname] = value
set_comment!(header, keyname, comment)
end
# Object Name
set!(header, "OBJECT", metadata[:source], "The name of the object")
# RA Axis
set!(header, "CTYPE1", "RA---SIN",
"data axis 1: Right Ascenction (RA)"
)
set!(header, "CRVAL1", obsra,
"RA coordinate at the reference pixel")
set!(header, "CDELT1", -rad2deg(metadata[:dx]),
"pixel size of the RA axis")
set!(header, "CRPIX1", metadata[:ixref],
"refrence pixel of the RA axis")
set!(header, "CUNIT1", "DEG",
"unit of CRVAL1 and CDELT1")
# Dec Axis
set!(header, "CTYPE2", "DEC--SIN",
"data axis 2: Declination (DEC)"
)
set!(header, "CRVAL2", obsdec,
"DEC coordinate at the reference pixel")
set!(header, "CDELT2", rad2deg(metadata[:dy]),
"pixel size of the DEC axis")
set!(header, "CRPIX2", metadata[:iyref],
"refrence pixel of the DEC axis")
set!(header, "CUNIT2", "DEG",
"unit of CRVAL2 and CDELT2")
# Frequency Axis
set!(header, "CTYPE3", "FREQ",
"data axis 3: frequency")
set!(header, "CRVAL3", reffreq,
"reference frequency")
set!(header, "CDELT3", 1,
"bandwidth of the frequency channel")
set!(header, "CRPIX3", 1,
"channel of the reference frequency")
set!(header, "CUNIT3", "HZ",
"unit of CRVAL3 and CDELT3")
# Frequency Axis
set!(header, "CTYPE4", "STOKES",
"data axis 4: stokes parameters"
)
set!(header, "CRVAL4", 1, "")
set!(header, "CDELT4", 1)
set!(header, "CRPIX4", 1)
set!(header, "CUNIT4", "", "Dimensionless")
# OBS RA and DEC
set!(header, "OBSRA", obsra, "Reference RA Coordinates in degree")
set!(header, "OBSDEC", obsdec, "Reference Dec Coordinates in degree")
set!(header, "FREQ", image.f[fidx], "Reference Frequency in Hz")
# OBS DATE
set!(header, "OBSDATE", Dates.format(mjd2datetime(mjd), "yyyy-mm-dd"),
"Observation Date")
set!(header, "MJD", mjd, "Modified Julian Date")
# Instruments
set!(header, "OBSERVER", metadata[:observer], "Name of the observer")
set!(header, "TELESCOP", metadata[:instrument], "Name of the observing instrument")
# Unit of the brightness
set!(header, "BUNIT", "JY/PIXEL", "Unif of the intensity")
# Equinox
set!(header, "RADESYS", uppercase(metadata[:coordsys]), "Coordinate System")
set!(header, "EQUINOX", metadata[:equinox], "Equinox")
# Equinox
set!(header, "PULSETYPE", uppercase(metadata[:pulsetype]), "Type of the pulse function")
# Write the image with the header.
write(f, permutedims(reshape(getindex(image.data, :, :, :, idx...), nx, ny, 1, np), (1, 2, 3, 4)), header=header)
close(f)
end
| EHTImages | https://github.com/EHTJulia/EHTImages.jl.git |
|
[
"MIT"
] | 0.2.4 | c033fd69366d1234a50afdeca8a4f654dea40afd | code | 6853 | export imagepixels
export intensitymap
export intensitymap2d
export intensitymap4d
export stokesintensitymap
export stokesintensitymap2d
export stokesintensitymap4d
export add!, mul!
"""
imagepixels(metadata) --> grid <: VLBISkyModels.AbstractDims
imagepixels(::AbstractIntensityImage) --> grid <: VLBISkyModels.AbstractDims
Create the grid instance for Comrade.
"""
@inline function imagepixels(metadata)
fovx, fovy = get_fov(metadata)
nx = metadata[:nx]
ny = metadata[:ny]
dx = metadata[:dx]
dy = metadata[:dy]
ixref = metadata[:ixref]
iyref = metadata[:iyref]
x0 = -dx * (ixref - (nx + 1) / 2)
y0 = dy * (iyref - (ny + 1) / 2)
return VLBISkyModels.imagepixels(fovx, fovy, nx, ny, x0, y0)
end
@inline imagepixels(im::AbstractIntensityImage) = imagepixels(im.metadata)
"""
$(FUNCTIONNAME)(im::AbstractIntensityImage, pidx=1, fidx=1, tidx=1) --> VLBISkyModels.IntensityMap
create a two-dimensional Comrade.IntensityMap model.
** Arguments **
- `pidx, fidx, tidx::Integer`: indices for polarizaiton, frequency and time, respectively.
"""
@inline function intensitymap2d(im::AbstractIntensityImage, pidx=1, fidx=1, tidx=1)
xg, yg = get_xygrid(im)
nx = im.metadata[:nx]
ny = im.metadata[:ny]
xitr = range(xg[end], xg[1], length=nx)
yitr = range(yg[1], yg[end], length=ny)
metadata = intensitymap_header2d(im, pidx, fidx, tidx)
dims = GriddedKeys{(:X, :Y)}((xitr, yitr), metadata)
return IntensityMap(im[end:-1:1, :, pidx, fidx, tidx], dims)
end
"""
$(FUNCTIONNAME)(im::AbstractIntensityImage, pidx=1) --> VLBISkyModels.IntensityMap
create a four-dimensional Comrade.IntensityMap model.
** Arguments **
- `pidx::Integer`: the polarization index.
"""
@inline function intensitymap4d(im::AbstractIntensityImage, pidx=1)
xg, yg = get_xygrid(im)
nx = im.metadata[:nx]
ny = im.metadata[:ny]
xitr = range(xg[end], xg[1], length=nx)
yitr = range(yg[1], yg[end], length=ny)
metadata = intensitymap_header4d(im, pidx)
dims = GriddedKeys{(:X, :Y, :F, :T)}((xitr, yitr, im.f, im.t), metadata)
return IntensityMap(im[end:-1:1, :, pidx, :, :], dims)
end
@inline function intensitymap_header2d(im::AbstractIntensityImage, pidx, fidx, tidx)
return (
source=im.metadata[:source],
RA=im.metadata[:xref],
DEC=im.metadata[:yref],
mjd=im.t[tidx],
F=im.f[fidx],
stokes=pidx
)
end
@inline function intensitymap_header4d(im::AbstractIntensityImage, pidx)
return (
source=im.metadata[:source],
RA=im.metadata[:xref],
DEC=im.metadata[:yref],
mjd=im.t[1],
F=im.f[1],
stokes=pidx
)
end
"""
stokesintensitymap2d(im::AbstractIntensityImage, fidx=1, tidx=1) --> VLBISkyModels.StokesIntensityMap
create a 2D Comrade.StokesIntensityMap model.
** Arguments **
- `fidx, tidx::Integer`: indices for frequency and time, respectively.
"""
@inline function stokesintensitymap2d(im::AbstractIntensityImage, fidx=1, tidx=1)
@assert im.metadata[:np] == 4
imap = intensitymap2d(im::AbstractIntensityImage, pidx=1, fidx, tidx)
qmap = intensitymap2d(im::AbstractIntensityImage, pidx=2, fidx, tidx)
umap = intensitymap2d(im::AbstractIntensityImage, pidx=3, fidx, tidx)
vmap = intensitymap2d(im::AbstractIntensityImage, pidx=4, fidx, tidx)
return VLBISkyModels.StokesIntensityMap(imap, qmap, umap, vmap)
end
"""
stokesintensitymap2d(im::AbstractIntensityImage) --> VLBISkyModels.StokesIntensityMap
create a 4D Comrade.StokesIntensityMap model.
"""
@inline function stokesintensitymap4d(im::AbstractIntensityImage)
@assert im.metadata[:np] == 4
imap = intensitymap4d(im::AbstractIntensityImage, pidx=1)
qmap = intensitymap4d(im::AbstractIntensityImage, pidx=2)
umap = intensitymap4d(im::AbstractIntensityImage, pidx=3)
vmap = intensitymap4d(im::AbstractIntensityImage, pidx=4)
return VLBISkyModels.StokesIntensityMap(imap, qmap, umap, vmap)
end
# load intensity map into the existing AbstractIntensityImage
@inline function Base.map!(im::AbstractIntensityImage, imap::VLBISkyModels.IntensityMap, pidx=1, fidx=1, tidx=1)
nimapdim = ndims(imap)
@assert nimapdim == 2 || nimapdim == 4
if nimapdim == 2
@assert size(imap) == (im.metadata[:nx], im.metadata[:ny])
im.data[:, :, pidx, fidx, tidx] .= imap.data.data[end:-1:1, 1:end]
else
@assert size(imap) == (im.metadata[:nx], im.metadata[:ny], im.metadata[:nf], im.metadata[:nt])
im.data[:, :, pidx, :, :] .= imap.data.data[end:-1:1, 1:end, :, :]
end
return nothing
end
@inline function add!(im::AbstractIntensityImage, imap::VLBISkyModels.IntensityMap, pidx=1, fidx=1, tidx=1)
nimapdim = ndims(imap)
@assert nimapdim == 2 || nimapdim == 4
if nimapdim == 2
@assert size(imap) == (im.metadata[:nx], im.metadata[:ny])
im.data[:, :, pidx, fidx, tidx] .+= imap.data.data[end:-1:1, 1:end]
else
@assert size(imap) == (im.metadata[:nx], im.metadata[:ny], im.metadata[:nf], im.metadata[:nt])
im.data[:, :, pidx, :, :] .+= imap.data.data[end:-1:1, 1:end, :, :]
end
return nothing
end
@inline function mul!(im::AbstractIntensityImage, imap::VLBISkyModels.IntensityMap, pidx=1, fidx=1, tidx=1)
nimapdim = ndims(imap)
@assert nimapdim == 2 || nimapdim == 4
if nimapdim == 2
@assert size(imap) == (im.metadata[:nx], im.metadata[:ny])
im.data[:, :, pidx, fidx, tidx] .*= imap.data.data[end:-1:1, 1:end]
else
@assert size(imap) == (im.metadata[:nx], im.metadata[:ny], im.metadata[:nf], im.metadata[:nt])
im.data[:, :, pidx, :, :] .*= imap.data.data[end:-1:1, 1:end, :, :]
end
return nothing
end
# load an abstract model into the existing AbstractIntensityImage
@inline function Base.map!(im::AbstractIntensityImage, model::ComradeBase.AbstractModel, pidx=1, fidx=1, tidx=1)
imap = intensitymap2d(im, pidx, fidx, tidx)
intensitymap!(imap, model)
im.data[:, :, pidx, fidx, tidx] = imap.data.data[end:-1:1, 1:end]
return nothing
end
@inline function add!(im::AbstractIntensityImage, model::ComradeBase.AbstractModel, pidx=1, fidx=1, tidx=1)
imap = intensitymap2d(im, pidx, fidx, tidx)
intensitymap!(imap, model)
im.data[:, :, pidx, fidx, tidx] .+= imap.data.data[end:-1:1, 1:end]
return nothing
end
@inline function mul!(im::AbstractIntensityImage, model::ComradeBase.AbstractModel, pidx=1, fidx=1, tidx=1)
imap = intensitymap2d(im, pidx, fidx, tidx)
intensitymap!(imap, model)
im.data[:, :, pidx, fidx, tidx] .*= imap.data.data[end:-1:1, 1:end]
return nothing
end
# defaults
intensitymap(im::AbstractIntensityImage, args...) = intensitymap2d(im, args...)
stokesintensitymap(im::AbstractIntensityImage, args...) = stokesintensitymap2d(im, args...)
| EHTImages | https://github.com/EHTJulia/EHTImages.jl.git |
|
[
"MIT"
] | 0.2.4 | c033fd69366d1234a50afdeca8a4f654dea40afd | code | 977 | function convolve(
image::DiskIntensityImage,
filename::AbstractString,
model::AbstractModel;
mode::Symbol=:create,
group::AbstractString=ncd_image_defaultgroup,
ex=SequentialEx()
)
# create a new file
newim = save_netcdf(image, filename, mode=mode, group=group)
# run convolution
convolve!(newim, model, ex=ex)
return newim
end
function convolve!(
image::DiskIntensityImage,
model::AbstractModel;
ex=SequentialEx()
)
# SequentialEx is not available for DiskIntensityImage
if (ex isa SequentialEx) == false
@throwerror ArgumentError "NetCDF4 only supports single thread writing. Please use SequentialEx."
end
# reopen in :append mode if not writable
flag = iswritable(image) == false
if flag
open!(image, :append)
end
convolve_base!(image, model, ex=ex)
# reopen in :read mode if reopened.
if flag
open!(image, :read)
end
return nothing
end
| EHTImages | https://github.com/EHTJulia/EHTImages.jl.git |
|
[
"MIT"
] | 0.2.4 | c033fd69366d1234a50afdeca8a4f654dea40afd | code | 1763 | export DiskIntensityImage
"""
$(TYPEDEF)
A data type for five dimensional images of which data are all stored in disk using
the NetCDF4 format. This format relies on `NCDatasets` to provide an easy access of data
through many useful methods in the `NCDatasets` package.
Note that this data type could be either mutable or immutable depending on the access mode
to the NetCDF4 file.
$(TYPEDFIELDS)
"""
@with_kw mutable struct DiskIntensityImage <: AbstractIntensityImage
"name of the corresponding NetCDF4 file"
filename = nothing
"group name of the corresponding image data set"
group = nothing
"five dimensional intensity disbrituion."
data = nothing
"metadata."
metadata = nothing
"polarization code, giving the parization axis (`:p`)."
t = nothing
"central frequency in Hz, giving the frequency axis (`:f`)."
f = nothing
"central modified Julian date, giving the time axis (`:t`)."
p = nothing
dataset = nothing
end
# DiskIntensityImage is a disk-based image data
isdiskdata(::DiskIntensityImage) = IsDiskData()
# This is a function to check if the image is opened.
function Base.isopen(image::DiskIntensityImage)::Bool
if isnothing(image.dataset)
return false
end
if image.dataset.ncid < 0
return false
else
return true
end
end
# This is a function to check if the image is writable.
function Base.iswritable(image::DiskIntensityImage)::Bool
if isopen(image)
return image.dataset.iswritable
else
return false
end
end
# raise error for copy
function Base.copy(::DiskIntensityImage)::Bool
@throwerror ArgumentError "Since DiskIntensityImage is disk-based, copy is not available. Use save_netcdf instead."
end
| EHTImages | https://github.com/EHTJulia/EHTImages.jl.git |
|
[
"MIT"
] | 0.2.4 | c033fd69366d1234a50afdeca8a4f654dea40afd | code | 2191 | """
ncd_intensity_defaultgroup::String
Default group name of the EHT Intensity Image NetCDF4 format.
"""
const ncd_intensity_defaultgroup = "intensityimage"
"""
ncd_intensity_dimnames::NamedTuple
A named tuple relating Symbols to actual strings for the dimension of the EHT Intensity Image NetCDF4 format.
keys are `:x, :y` for x and y axis, `:p` for polarization, `:f` for frequency, `:t` for time.
"""
const ncd_intensity_dimnames = (
x="x",
y="y",
p="polarization",
f="frequency",
t="time",
)
"""
ncd_intensity_varnames::NamedTuple
A named tuple relating Symbols to names of the corresponding variables
of the EHT Intensity Image NetCDF4 format.
"""
const ncd_intensity_varnames = (
data="intensity",
x="x",
y="y",
p="polarization",
f="frequency",
t="time",
)
"""
ncd_intensity_vartypes::NamedTuple
A named tuple relating Symbols to types of the corresponding variables
of the EHT Intensity Image NetCDF4 format.
"""
const ncd_intensity_vartypes = (
data=Float64,
x=Float64,
y=Float64,
p=String,
f=Float64,
t=Float64,
)
"""
ncd_intensity_vartypes::NamedTuple
A named tuple relating Symbols to types of the corresponding variables
of the EHT Intensity Image NetCDF4 format.
"""
const ncd_intensity_vardims = (
data=tuple(ncd_intensity_dimnames...,),
x=(ncd_intensity_dimnames[:x],),
y=(ncd_intensity_dimnames[:y],),
p=(ncd_intensity_dimnames[:p],),
f=(ncd_intensity_dimnames[:f],),
t=(ncd_intensity_dimnames[:t],),
)
"""
ncd_intensity_metadata_typeconv::NamedTuple
A named tuple relating Symbols to types of the corresponding variables
of the EHT Intensity Image NetCDF4 format.
"""
const ncd_intensity_metadata_typeconv = (
format=string,
version=string,
source=string,
instrument=string,
observer=string,
coordsys=string,
equinox=float,
nx=int,
dx=float,
xref=float,
ixref=float,
xunit=string,
ny=int,
dy=float,
yref=float,
iyref=float,
yunit=string,
np=int,
polrep=string,
nf=int,
funit=string,
nt=int,
tunit=string,
fluxunit=string,
pulsetype=string,
)
| EHTImages | https://github.com/EHTJulia/EHTImages.jl.git |
|
[
"MIT"
] | 0.2.4 | c033fd69366d1234a50afdeca8a4f654dea40afd | code | 2684 | export close, close!
export load_image
export open!
"""
open!(image[, mode])
Load image data from NCDataset specified in the input image object
with the given access mode. If image data are already opened,
it will close it and reload data again.
# Arguments
- `image::DiskIntensityImage`:
The input image object.
- `mode::Symbol=:read`:
The access mode to NCDataset.
Available modes are `:read`, `:append`, `:create`.
See help for `EHTImage.ncdmodes` for details.
"""
function open!(image::DiskIntensityImage, mode::Symbol=:read)
# get mode string
modestr = ncdmodes[mode]
# check if the file is already opened
if isopen(image)
@debug "The input image is already loaded. To be reloaded again."
close!(image)
end
# open NetCDF file
image.dataset = NCDataset(image.filename, modestr)
# get the group
groups = split_group(image.group)
imds = get_group(image.dataset, groups)
# load arrays
image.data = imds[ncd_intensity_varnames[:data]].var
image.metadata = imds.attrib
image.t = imds[ncd_intensity_varnames[:t]].var
image.f = imds[ncd_intensity_varnames[:f]].var
image.p = imds[ncd_intensity_varnames[:p]].var
return nothing
end
"""
load_image(filename; [group, mode]) -> DiskIntensityImage
Load image data from the specified group in the given NetCDF4 file
with the specified access mode.
# Arguments
- `filename::AbstractString`:
The input NetCDF4 file.
- `group::AbstractString=EHTImage.ncd_intensity_defaultgroup`
The group of the image data in the input NetCDF4 file.
- `mode::Symbol=:read`:
The access mode to NCDataset.
Available modes are `:read`, `:append`, `:create`.
See help for `EHTImage.ncdmodes` for details.
"""
function load_image(
filename::AbstractString;
group::AbstractString=ncd_intensity_defaultgroup,
mode::Symbol=:read
)::DiskIntensityImage
# check modes
if mode ∉ [:read, :append]
@throwerror ArgumentError "`mode` should be `:read` or `:append`."
end
# generate image object
image = DiskIntensityImage(
filename=filename,
group=group,
)
# load image
open!(image, mode)
return image
end
"""
close!(image::DiskIntensityImage)
Close the access to the associated NetCDF4 file.
"""
function close!(image::DiskIntensityImage)
if isopen(image)
Base.close(image.dataset)
end
return nothing
end
"""
close(image::DiskIntensityImage)
Close the access to the associated NetCDF4 file.
This function is an alias to close!(image).
"""
function Base.close(image::DiskIntensityImage)
close!(image)
return nothing
end
| EHTImages | https://github.com/EHTJulia/EHTImages.jl.git |
|
[
"MIT"
] | 0.2.4 | c033fd69366d1234a50afdeca8a4f654dea40afd | code | 9284 | export create_DiskIntensityImage
export save_netcdf, save_netcdf!
"""
$(FUNCTIONNAME)(filename, nx, dx, angunit; keywords) -> DiskIntensityImage
Create a blank `DiskIntensityImage` object. Return `DiskIntensityImage` data loaded with :read mode.
# Arguments
- `filename::AbstractString`:
NetCDF4 file where image data will be created.
- `nx::Integer`:
the number of pixels along with the horizontal axis. Must be positive.
- `dx::Real`:
the pixel size of the horizontal axis. Must be positive.
- `angunit::Union{Unitful.Quantity, Unitful.Units or String}=rad`:
the angular unit for `dx` and `dy`.
# Keywords
- `ny::Real=nx`:
the number of pixels along with the vertical axis. Must be positive.
- `dy::Real=dx`:
the pixel size of the vertical axis. Must be positive.
- `ixref::Real=(nx + 1) / 2`, `iyref::Real=(ny + 1) / 2`:
index of the reference pixels along with the horizontal and vertical
axises, respectively. Default values set to the center of the field
of the view.
- `pol::Symbol=:single`:
number of polarizations. Availables are `:single` or `:full` (i.e. four)
polarizations.
- `freq::Vector{Float64}=[1.0]`:
a vector for frequencies in the unit of Hz
- `mjd::Vector{Float64}=[0.0]`:
a vector for time in the unit of MJD.
- `metadata::AbstractDict=default_metadata(AbstractIntensityImage)`:
other metadata. Note that the above keywords and arguments will overwrite
the values of the conflicting keys in this `metadata` argument.
- `mode::Symbol=:create`:
The access mode to NCDataset.
Available modes are `:read`, `:append`, `:create`.
See help for `EHTNCDBase.ncdmodes` for details.
- `group::AbstractString=EHTImage.ncd_intensity_defaultgroup`:
The group of the image data in the input NetCDF4 file.
"""
function diskintensityimage(
filename::AbstractString,
nx::Integer,
dx::Real,
angunit::Union{Unitful.Quantity,Unitful.Units,String};
ixref::Real=(nx + 1) / 2,
ny::Real=nx,
dy::Real=dx,
iyref::Real=(ny + 1) / 2,
pol::Symbol=:single,
freq::Vector{Float64}=[1.0],
mjd::Vector{Float64}=[0.0],
metadata::AbstractDict=default_metadata(AbstractIntensityImage),
mode::Symbol=:create,
group::AbstractString=ncd_intensity_defaultgroup
)::DiskIntensityImage
# check variables
for arg in [nx, dx, ny, dy]
if arg <= 0
@throwerror ArgumentError "`nx`, `ny`, `dx` and `dy` must be positive"
end
end
# get the number of polarization
if pol ∉ [:single, :full]
@throwerror ArgumentError "we only support `:single` or `:full` polarizaiton images"
elseif pol == :single
np = 1 # single polarization
else
np = 4 # full polarization
end
# get the size of mjd and freq
nf = length(freq)
nt = length(mjd)
# get mode string
if mode ∉ [:create, :append]
@throwerror ArgumentError "mode must be :create or :append"
else
modestr = ncdmodes[mode]
end
# create the output NetCDF file
dataset = NCDataset(filename, modestr)
# define the group
groups = split_group(group)
imds = define_group(dataset, groups)
# conversion factor of angular unit
if angunit == rad
aconv = 1
else
aconv = unitconv(angunit, rad)
end
# set metadata
# initialize metadata
attrib = default_metadata(AbstractIntensityImage)
# input metadata in arguments
for key in keys(metadata)
attrib[key] = metadata[key]
end
# input other information from arguments
attrib[:nx] = nx
attrib[:dx] = dx * aconv
attrib[:ixref] = ixref
attrib[:ny] = ny
attrib[:dy] = dy * aconv
attrib[:iyref] = iyref
attrib[:np] = np
attrib[:nf] = nf
attrib[:nt] = nt
# set metadata
set_ncd_intensity_metadata!(imds, attrib)
# define dimensions and variables
define_ncd_intensity_dimensions!(imds, nx, ny, np, nf, nt)
define_ncd_intensity_variables!(imds)
# initialize variables
# image
imds[ncd_intensity_varnames[:data]].var[:] = 0.0
# x and y
xg, yg = get_xygrid(attrib)
imds[ncd_intensity_varnames[:x]].var[:] = xg[:]
imds[ncd_intensity_varnames[:y]].var[:] = yg[:]
# polarization
imds[ncd_intensity_varnames[:p]].var[:] = ["I", "Q", "U", "V"][1:np]
# frequency
imds[ncd_intensity_varnames[:f]].var[:] = freq[:]
# time
imds[ncd_intensity_varnames[:t]].var[:] = mjd[:]
# close data set
NCDatasets.close(dataset)
# open file
image = load_image(filename, group=group, mode=:read)
return image
end
"""
save_netcdf!(image, filename; [mode, group])
Save image data to NetCDF4 format.
# Arguments
- `image::AbstractIntensityImage`
Input image data
- `filename::AbstractString`:
NetCDF4 file where image data will be saved.
- `mode::Symbol=:create`:
The access mode to NCDataset.
Available modes are `:append` and `:create`.
See help for `EHTNCDBase.ncdmodes` for details.
- `group::AbstractString=EHTImage.ncd_intensity_defaultgroup`:
The group of the image data in the input NetCDF4 file.
"""
function save_netcdf!(
image::AbstractIntensityImage,
filename::AbstractString;
mode::Symbol=:create,
group::AbstractString=ncd_intensity_defaultgroup
)
# get mode string
if mode ∉ [:create, :append]
@throwerror ArgumentError "mode must be :create or :append"
else
modestr = ncdmodes[mode]
end
# check if the file is already opened
if isopen(image) == false
open!(image, :read)
end
# create the output NetCDF file
outdataset = NCDataset(filename, modestr)
# define the group
groups = split_group(group)
outsubds = define_group(outdataset, groups)
# get the size of the image
nx, ny, np, nf, nt = size(image)
# define dimensions and variables
define_ncd_intensity_dimensions!(outsubds, nx, ny, np, nf, nt)
define_ncd_intensity_variables!(outsubds)
# set metadata
# initialize metadata
attrib = default_metadata(AbstractIntensityImage)
# fill metadata
for key in keys(image.metadata)
skey = Symbol(key)
attrib[skey] = image.metadata[skey]
end
# write metadata
set_ncd_intensity_metadata!(outsubds, attrib)
# set variables
# image
outsubds[ncd_intensity_varnames[:data]].var[:, :, :, :, :] = image.data[:, :, :, :, :]
# x and y
xg, yg = get_xygrid(image)
outsubds[ncd_intensity_varnames[:x]].var[:] = xg[:]
outsubds[ncd_intensity_varnames[:y]].var[:] = yg[:]
# pol, freq, mjd
outsubds[ncd_intensity_varnames[:p]].var[:] = image.p[:]
outsubds[ncd_intensity_varnames[:f]].var[:] = image.f[:]
outsubds[ncd_intensity_varnames[:t]].var[:] = image.t[:]
# close data set
NCDatasets.close(outdataset)
return nothing
end
"""
save_netcdf(image, filename; [mode=:create, group="image"]) => DiskIntensityImage
Save image data to NetCDF4 format. Saved data will be loaded and returned
with `:read` access mode.
# Arguments
- `image::AbstractIntensityImage`
Input image data
- `filename::AbstractString`:
NetCDF4 file where image data will be saved.
- `mode::Symbol=:create`:
The access mode to NCDataset.
Available modes are `:read`, `:append`, `:create`.
See help for `EHTNCDBase.ncdmodes` for details.
- `group::AbstractString=EHTImage.ncd_intensity_defaultgroup`:
The group of the image data in the input NetCDF4 file.
"""
function save_netcdf(
image::AbstractIntensityImage,
filename::AbstractString;
mode::Symbol=:create,
group::AbstractString=ncd_intensity_defaultgroup
)::DiskIntensityImage
save_netcdf!(image, filename, mode=mode, group=group)
return load_image(filename, group=group, mode=:read)
end
"""
define_ncd_intensity_dimensions!(ncd[, nx, ny, np, nf, nt])
Define NetCDF4 dimensions based on the given size of the image data.
"""
function define_ncd_intensity_dimensions!(ncd, nx=1, ny=1, np=1, nf=1, nt=1)
# image size
imsize = (nx, ny, np, nf, nt)
# set dimension
for i in 1:5
@debug i, ncd_intensity_dimnames[i], imsize[i]
defDim(ncd, ncd_intensity_dimnames[i], imsize[i])
end
return nothing
end
"""
define_ncd_intensity_variables!(ncd)
Define NetCDF4 variables based on EHT NetCDF4 Image Format.
"""
function define_ncd_intensity_variables!(ncd)
# define variables
for key in keys(ncd_intensity_varnames)
@debug key, ncd_intensity_varnames[key], ncd_intensity_vartypes[key], ncd_intensity_vardims[key]
defVar(ncd, ncd_intensity_varnames[key], ncd_intensity_vartypes[key], ncd_intensity_vardims[key])
end
return nothing
end
"""
set_ncd_intensity_metadata!(ncd)
Set NetCDF4 metadata based on EHT NetCDF4 Image Format.
"""
function set_ncd_intensity_metadata!(ncd, metadata)
# shortcut to the format
tconv = ncd_intensity_metadata_typeconv
tkeys = keys(ncd_intensity_metadata_typeconv)
# update metadata
for key in keys(metadata)
if key in tkeys
ncd.attrib[key] = tconv[key](metadata[key])
else
ncd.attrib[key] = metadata[key]
end
end
return nothing
end
| EHTImages | https://github.com/EHTJulia/EHTImages.jl.git |
|
[
"MIT"
] | 0.2.4 | c033fd69366d1234a50afdeca8a4f654dea40afd | code | 5524 | export IntensityImage
export intensityimage
"""
$(TYPEDEF)
A data type for five dimensional images of which data are all stored in the memory.
This format relies on `EHTDimensionalData.DimArray` to provide an easy access of data
through many useful methods in `EHTDimensionalData` and its origin `DimensionalData` packages.
Note that this data type is immutable.
$(TYPEDFIELDS)
"""
@with_kw struct IntensityImage <: AbstractIntensityImage
"`DimArray` storing all of `data`, `p`, `f`, `t` and `metadata`."
dimstack::DimStack
"the five dimensional intensity disbrituion. Alias to `dimstack.intensity.data`."
data::Array{Float64,5}
"metadata. Aliast to `dimarray.metadata`."
metadata::OrderedDict{Symbol,Any}
"the polarization code, giving the polarization axis (`:p`) of `dimarray`. Alias to `dimarray.dims[3].val.data`."
p::Vector{String}
"the central frequency in Hz, giving the frequency axis (`:f`) of `dimarray`. Alias to `dimarray.dims[4].val.data`."
f::Vector{Float64}
"the central modified Julian date, giving the time axis (`:t`) of `dimarray`. Alias to `dimarray.dims[5].val.data`."
t::Vector{Float64}
# Constructor from DimArray
function IntensityImage(dimstack::DimStack)
data = dimstack[:intensity].data
metadata = getfield(dimstack, :metadata)
p = dimstack[:polarization].data
f = dimstack[:frequency].data
t = dimstack[:time].data
new(dimstack, data, metadata, p, f, t)
end
end
# It is memory-based, so non-disk-based, always accessible and writable.
@inline isdiskdata(::IntensityImage) = NotDiskData()
Base.isopen(::IntensityImage) = true
Base.iswritable(::IntensityImage) = true
Base.show(io::IO, mine::MIME"text/plain", image::IntensityImage) = show(io, mine, image.dimstack)
Base.copy(im::IntensityImage) = IntensityImage(copy(im.dimstack))
"""
$(FUNCTIONNAME)(nx, dx, angunit; keywords) -> IntensityImage
Create and return a blank `IntensityImage` object.
# Arguments
- `nx::Integer`:
the number of pixels along with the horizontal axis. Must be positive.
- `dx::Real`:
the pixel size of the horizontal axis. Must be positive.
- `angunit::Union{Unitful.Quantity, Unitful.Units or String}=rad`:
the angular unit for `dx` and `dy`.
# Keywords
- `ny::Real=nx`:
the number of pixels along with the vertical axis. Must be positive.
- `dy::Real=dx`:
the pixel size of the vertical axis. Must be positive.
- `ixref::Real=(nx + 1) / 2`, `iyref::Real=(ny + 1) / 2`:
index of the reference pixels along with the horizontal and vertical
axises, respectively. Default values set to the center of the field
of the view.
- `p::Symbol=:single`:
number of parizations. Availables are `:single` or `:full` (i.e. four)
parizations.
- `f::Vector{Float64}=[1.0]`:
a vector for fuencies in the unit of Hz
- `t::Vector{Float64}=[0.0]`:
a vector for time in the unit of t.
- `metadata::AbstractDict=default_metadata(AbstractIntensityImage)`:
other metadata. Note that the above keywords and arguments will overwrite
the values of the conflicting keys in this `metadata` argument.
"""
function intensityimage(
nx::Integer,
dx::Real,
angunit::Union{Unitful.Quantity,Unitful.Units,String};
ixref::Real=(nx + 1) / 2,
ny::Real=nx,
dy::Real=dx,
iyref::Real=(ny + 1) / 2,
p::Symbol=:single,
f::Vector{Float64}=[1.0],
t::Vector{Float64}=[0.0],
metadata::AbstractDict=default_metadata(AbstractIntensityImage)
)
# check variables
for arg in [nx, dx, ny, dy]
if arg <= 0
@throwerror ArgumentError "`nx`, `ny`, `dx` and `dy` must be positive"
end
end
# get the number of parization
if p ∉ [:single, :full]
@throwerror ArgumentError "we only support `:single` or `:full` parizaiton images"
elseif p == :single
np = 1 # single parization
else
np = 4 # full parization
end
# get the size of t and f
nf = length(f)
nt = length(t)
# conversion factor of angular unit
if angunit == rad
aconv = 1
else
aconv = unitconv(angunit, rad)
end
# set metadata
# initialize metadata
attrib = default_metadata(AbstractIntensityImage)
# input metadata in arguments
for key in keys(metadata)
skey = Symbol(key)
attrib[skey] = metadata[key]
end
# input other information from arguments
attrib[:nx] = nx
attrib[:dx] = dx * aconv
attrib[:ixref] = ixref
attrib[:ny] = ny
attrib[:dy] = dy * aconv
attrib[:iyref] = iyref
attrib[:np] = np
attrib[:nf] = nf
attrib[:nt] = nt
# define x, y grids
dimx = Dim{:x}(1:nx)
dimy = Dim{:y}(1:ny)
dimp = Dim{:polarization}(1:np)
dimf = Dim{:frequency}(1:nf)
dimt = Dim{:time}(1:nt)
# create DimStack data
intensity = DimArray(
data=zeros(Float64, (nx, ny, np, nf, nt)),
dims=(dimx, dimy, dimp, dimf, dimt),
name=:intensity,
)
polarization = DimArray(
data=["I", "Q", "U", "V"][1:np],
dims=(dimp,),
name=:polarization
)
frequency = DimArray(
data=f,
dims=(dimf,),
name=:frequency
)
time = DimArray(
data=t,
dims=(dimt,),
name=:time
)
dimstack = DimStack(
(intensity, polarization, frequency, time),
metadata=attrib
)
# create a IntensityImage instance.
return IntensityImage(dimstack)
end
| EHTImages | https://github.com/EHTJulia/EHTImages.jl.git |
|
[
"MIT"
] | 0.2.4 | c033fd69366d1234a50afdeca8a4f654dea40afd | code | 6043 | export load_fits
"""
load_fits(filename::AbstractString, hduid::Integer=1) -> IntensityImage
load_fits(fits::FITS, hduid::Integer=1) -> IntensityImage
load_fits(hdu::ImageHDU) -> IntensityImage
Load the input FITS image into `IntensityImage` (in-memory image data).
# Arguments
- `filename::AbstractString`: name of the input FITS file
- `hduid::Integer=1`: ID of the HDU to be loaded. Default to the primary HDU.
- `hdu::ImageHDU`: HDU to be loaded.
"""
function load_fits(filename::AbstractString, hduid::Integer=1)::IntensityImage
f = FITS(filename, "r")
hdu = f[hduid]
image = load_fits(hdu)
close(f)
return image
end
# loader from a FITS object
function load_fits(fits::FITS, hduid::Integer=1)::IntensityImage
hdu = fits[hduid]
image = load_fits(hdu)
return image
end
# loader from an ImageHDU object
function load_fits(hdu::ImageHDU)::IntensityImage
# Check the dimension of the input HDU
naxis = ndims(hdu)
if naxis == 2
nx, ny = size(hdu)
nf = 1
np = 1
elseif naxis == 4
nx, ny, nf, np = size(hdu)
else
@throwerror ArgumentError "The input HDU has a non-standard dimension."
end
nt = 1
# Get header
header = read_header(hdu)
header_keys = keys(header)
# Check the axis of the input HDU
if occursin("RA", uppercase(header["CTYPE1"])) == false
@throwerror ArgumentError "Non standard image FITS format: data axis 1 is apparently not RA."
end
if occursin("DEC", uppercase(header["CTYPE2"])) == false
@throwerror ArgumentError "Non standard image FITS format: data axis 2 is apparently not DEC."
end
if naxis == 4
if occursin("FREQ", uppercase(header["CTYPE3"])) == false
@throwerror ArgumentError "Non standard image FITS format: data axis 3 is apparently not FREQ."
end
if occursin("STOKES", uppercase(header["CTYPE4"])) == false
@throwerror ArgumentError "Non standard image FITS format: data axis 4 is apparently not STOKES."
end
end
# load metadata
metadata = default_metadata(AbstractIntensityImage)
if "OBJECT" in header_keys
metadata[:source] = header["OBJECT"]
end
if "TELESCOP" in header_keys
metadata[:instrument] = header["TELESCOP"]
end
if "OBSERVER" in header_keys
metadata[:observer] = header["OBSERVER"]
end
if "RADESYS" in header_keys
metadata[:coordsys] = lowercase(header["RADESYS"])
end
if "EQUINOX" in header_keys
metadata[:equinox] = header["EQUINOX"]
end
if "PULSETYPE" in header_keys
metadata[:pulsetype] = header["PULSETYPE"]
end
# Load Time
mjd = [datetime2mjd(now())]
if "MJD" in header_keys
mjd = [header["MJD"]]
else
date_keys = ("OBSDATE", "DATE-OBS", "DATE")
for key in date_keys
if key in header_keys
try
mjd = [datetime2mjd(DateTime(header[key], "yyyy-mm-dd"))]
break
catch
print("Warning: non-standard value is found for ", key, ".\n")
end
end
end
end
# Load Frequency
if naxis == 2
freq = [1.0]
freq_keys = ("OBSFREQ", "FREQ")
for key in freq_keys
if key in header_keys
freq = [header[key]]
break
end
end
else
fref = header["CRVAL3"]
df = header["CDELT3"]
ifref = header["CRPIX3"]
freq = ((1-ifref)*df+fref):df:((nf-ifref)*df+fref)
end
# Load Polarization
int(x) = floor(Int, x)
stokes2pol = Dict(
"1" => "I",
"2" => "Q",
"3" => "U",
"4" => "V",
)
if naxis == 2
pol = ["I"]
for key in ("STOKES")
if key in header_keys
pol = [header[key]]
break
end
end
else
sref = int(header["CRVAL4"])
ds = int(header["CDELT4"])
isref = int(header["CRPIX4"])
stokesid = ((1-isref)*ds+sref):ds:((np-isref)*ds+sref)
pol = [stokes2pol[string(i)] for i in stokesid]
end
# Load x and y axis
for key in ("CRVAL1", "OBSRA", "RA")
if key in header_keys
metadata[:xref] = deg2rad(header[key])
break
end
end
for key in ("CRVAL2", "OBSDEC", "DEC")
if key in header_keys
metadata[:yref] = deg2rad(header[key])
break
end
end
if "CDELT1" in header_keys
metadata[:dx] = abs(deg2rad(header["CDELT1"]))
end
if "CDELT2" in header_keys
metadata[:dy] = abs(deg2rad(header["CDELT2"]))
end
if "CRPIX1" in header_keys
metadata[:ixref] = header["CRPIX1"]
else
metadata[:ixref] = (nx + 1) / 2
end
if "CRPIX2" in header_keys
metadata[:iyref] = header["CRPIX2"]
else
metadata[:iyref] = (ny + 1) / 2
end
metadata[:nx] = nx
metadata[:ny] = ny
metadata[:np] = np
metadata[:nf] = nf
metadata[:nt] = nt
dimx = Dim{:x}(1:nx)
dimy = Dim{:y}(1:ny)
dimp = Dim{:p}(1:np)
dimf = Dim{:f}(1:nf)
dimt = Dim{:t}(1:nt)
# Load Image Data
data = read(hdu)
if naxis == 4
data = permutedims(data, (1, 2, 4, 3))
end
data = reshape(data, nx, ny, np, nf, nt)
intensity = DimArray(
data=data,
dims=(dimx, dimy, dimp, dimf, dimt),
name=:intensity,
)
polarization = DimArray(
data=pol,
dims=(dimp,),
name=:polarization
)
frequency = DimArray(
data=freq,
dims=(dimf,),
name=:frequency
)
time = DimArray(
data=mjd,
dims=(dimt,),
name=:time
)
dimstack = DimStack(
(intensity, polarization, frequency, time),
metadata=metadata
)
# create a IntensityImage instance.
return IntensityImage(dimstack)
end
| EHTImages | https://github.com/EHTJulia/EHTImages.jl.git |
|
[
"MIT"
] | 0.2.4 | c033fd69366d1234a50afdeca8a4f654dea40afd | code | 1405 | export load
"""
load(image::NCImage) --> DDImage
Load image data from the input disk image to memory.
# Arguments
- `image::NCImage`: Input NCImage.
"""
function load(image::DiskIntensityImage)::IntensityImage
# Open the input image in read mode
if isopen(image) == false
open!(image, :read)
end
# Load metadata
metadata = default_metadata(AbstractIntensityImage)
for key in keys(image.metadata)
skey = Symbol(key)
metadata[skey] = image.metadata[skey]
end
# get the size of the image
nx, ny, np, nf, nt = size(image)
# define dimensions
dimx = Dim{:x}(1:nx)
dimy = Dim{:y}(1:ny)
dimp = Dim{:p}(1:np)
dimf = Dim{:f}(1:nf)
dimt = Dim{:t}(1:nt)
# create DimArray data
intensity = DimArray(
data=image.data[:, :, :, :, :],
dims=(dimx, dimy, dimp, dimf, dimt),
name=:intensity,
)
polarization = DimArray(
data=image.p[:],
dims=(dimp,),
name=:polarization
)
frequency = DimArray(
data=image.f[:],
dims=(dimf,),
name=:frequency
)
time = DimArray(
data=image.t[:],
dims=(dimt,),
name=:time
)
dimstack = DimStack(
(intensity, polarization, frequency, time),
metadata=metadata
)
# create an `IntensityImage` instance.
return IntensityImage(dimstack)
end
| EHTImages | https://github.com/EHTJulia/EHTImages.jl.git |
|
[
"MIT"
] | 0.2.4 | c033fd69366d1234a50afdeca8a4f654dea40afd | code | 1741 | export load
function load(imap::VLBISkyModels.IntensityMap; p=:single, pidx=1, fidx=1, tidx=1)
# get the number of dimensions
ndim = ndims(imap)
@assert ndim == 2 || ndim == 4
# get the header from the input Intensity Map
# default metadata
metadata_default = (
source="unknown",
RA=0.0,
DEC=0.0,
mjd=1.0,
F=1.0,
stokes=1
)
metadata = OrderedDict()
for key in keys(metadata_default)
metadata[key] = metadata_default[key]
end
# Load metadata
metadata_imap = ComradeBase.header(imap)
if metadata_imap != ComradeBase.NoHeader()
for key in keys(metadata_imap)
metadata[key] = metadata_imap[key]
end
end
# get image grids
if ndim == 2
nx, ny = size(imap)
nf = 1
nt = 1
f = [metadata[:F]]
t = [metadata[:mjd]]
else
nx, ny, nf, nt = size(imap)
f = collect(imap.F)
t = collect(imap.T)
end
dxrad = imap.X.step.hi
dyrad = imap.Y.step.hi
ixref = -imap.X[1] / dxrad + 1
iyref = -imap.Y[1] / dyrad + 1
# create metadata
metadata_im = default_metadata(IntensityImage)
metadata_im[:source] = metadata[:source]
metadata_im[:x] = metadata[:RA]
metadata_im[:y] = metadata[:DEC]
im = intensityimage(nx, dxrad, rad; ny=ny, dy=dyrad, ixref=ixref, iyref=iyref, p=p, metadata=metadata_im)
if p == :single
pidx_im = 1
else
pidx_im = pidx
end
if ndim == 2
im.data[:, :, pidx_im, tidx, fidx] = imap[end:-1:1, :]
im.p[1] = ("I", "Q", "U", "V")[pidx]
else
im.data[:, :, pidx_im, :, :] = imap[end:-1:1, :, :, :]
end
return im
end
| EHTImages | https://github.com/EHTJulia/EHTImages.jl.git |
|
[
"MIT"
] | 0.2.4 | c033fd69366d1234a50afdeca8a4f654dea40afd | code | 91 | using EHTImages
using Test
@testset "EHTImages.jl" begin
# Write your tests here.
end
| EHTImages | https://github.com/EHTJulia/EHTImages.jl.git |
|
[
"MIT"
] | 0.2.4 | c033fd69366d1234a50afdeca8a4f654dea40afd | docs | 5238 | # EHTImages
[](https://ehtjulia.github.io/EHTImages.jl/dev/)
[](https://github.com/EHTJulia/EHTImages.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://codecov.io/gh/EHTJulia/EHTImages.jl)
This module provides data types and implements basic functions to handle five-dimensional astronomical images for radio interferometry. The module aims to provide the following features, meeting the needs for multi-dimensional high-resolution imaging, particularly for Very Long Baseline Interferometry (e.g., Event Horizon Telescope) and millimeter interferometry (e.g., ALMA) in the regime of narrow field of views.
The package currently implements:
- Provides abstract types and methods to handle both in-memory and disk-based image cubes.
- Offers native support for five-dimensional images (x, y, frequency, polarization, time) in a self-descriptive data format.
- Supports non-equidistant grid in time for the application of dynamic imaging methods (e.g., Johnson et al., 2017, Bouman et al., 2017).
- Supports non-equidistant grid in frequency for the application of multi-frequency imaging methods (e.g., Chael et al., 2023).
- Supports both in-memory and disk-based (lazily-loaded) image files.
- In-memory data is stored in a self-descriptive data type powered by [EHTDimensionalData.jl](https://github.com/EHTJulia/EHTDimensionalData.jl) (an extension of the powerful [DimensionalData.jl](https://github.com/rafaqz/DimensionalData.jl)).
- Disk-based data is based on NetCDF (on HDF5) accessed by [NCDatasets.jl](https://github.com/Alexander-Barth/NCDatasets.jl), allowing lazy access to data suitable for a large image cube that may not fit into memory and also for containing multiple image data sets inside a single file.
- Includes a FITS writer and loader compatible with the eht-imaging library (Chael et al., 2016, 2018) and SMILI (Akiyama et al., 2017a, b) for the EHT community, as well as with more traditional packages including AIPS, DIFMAP, and CASA software packages.
- Provides interactive plotting tools powered by [PythonPlot.jl](https://github.com/JuliaPy/PythonPlot.jl).
- Offers interactive tools to analyze, edit, and transform images using pure Julia native functions.
## Installation
Assuming that you already have Julia correctly installed, it suffices to import EHTImages.jl in the standard way:
```julia
using Pkg
Pkg.add("EHTImages")
```
EHTImages.jl uses [PythonPlot.jl](https://github.com/stevengj/PythonPlot.jl) for the image visulization.
You can use a custom set of perceptually uniform colormaps implemented in the Python's [ehtplot](https://github.com/liamedeiros/ehtplot) library, which
has been used in the publications of the EHT Collaboration, by installing it through [CondaPkg.jl](https://github.com/cjdoris/CondaPkg.jl) and
import it using [PythonCall.jl](https://github.com/cjdoris/PythonCall.jl). For example:
```julia
# Install CondaPkg.jl and PythonCall.jl: (need to run only once in your local/global Julia enviroment)
using Pkg
Pkg.add("CondaPkg")
Pkg.add("PythonCall")
# Install ehtplot (again need to run only once in your local/global Julia enviroment)
using CondaPkg
CondaPkg.add_pip("ehtplot", version="@git+https://github.com/liamedeiros/ehtplot")
```
After installing ehtplot, you can import and utilize it for image visualization in EHTImages.jl. See the documentation.
## Documentation
The documentation is in progress, but the documentation of some key data types are aldready made for the [latest](https://ehtjulia.github.io/EHTImages.jl/dev) version along with all docstings of types, methods and constants. The stable version has not been released.
## What if you don't find a feature you want?
We are prioritizing to implement features needed for the image analysis conducted in the EHT and ngEHT Collaborations. Nevertheless, your feedback is really helpful to make the package widely useful for the broad community. Please request a feature in the [GitHub's issue page](https://github.com/EHTJulia/EHTImages.jl/issues).
## Acknowledgements
The development of this package has been finantially supported by the following programs.
- [AST-2107681](https://www.nsf.gov/awardsearch/showAward?AWD_ID=2107681), National Science Foundation, USA: v0.1.5 - present
- [AST-2034306](https://www.nsf.gov/awardsearch/showAward?AWD_ID=2034306), National Science Foundation, USA: v0.1.5 - present
- [OMA-2029670](https://www.nsf.gov/awardsearch/showAward?AWD_ID=2029670), National Science Foundation, USA: v0.1.5 - present
- [AST-1935980](https://www.nsf.gov/awardsearch/showAward?AWD_ID=1935980), National Science Foundation, USA: v0.1.5 - present
- [ALMA North American Development Study Cycle 8](https://science.nrao.edu/facilities/alma/science_sustainability/alma-develop-history), National Radio Astronomy Observatory, USA: v0.1.0 - v0.1.4
The National Radio Astronomy Observatory is a facility of the National Science Foundation operated under cooperative agreement by Associated Universities, Inc.
| EHTImages | https://github.com/EHTJulia/EHTImages.jl.git |
|
[
"MIT"
] | 0.2.4 | c033fd69366d1234a50afdeca8a4f654dea40afd | docs | 78 | ```@meta
CurrentModule = EHTImages
```
```@autodocs
Modules = [EHTImages]
``` | EHTImages | https://github.com/EHTJulia/EHTImages.jl.git |
|
[
"MIT"
] | 0.2.4 | c033fd69366d1234a50afdeca8a4f654dea40afd | docs | 4499 | ```@meta
CurrentModule = EHTImages
```
# EHTImages.jl
This module provides data types and implements basic functions to handle five-dimensional astronomical images for radio interferometry. The module aims to provide the following features, meeting the needs for multi-dimensional high-resolution imaging, particularly for Very Long Baseline Interferometry (e.g., Event Horizon Telescope) and millimeter interferometry (e.g., ALMA) in the regime of narrow field of views.
The package currently implements:
- Provides abstract types and methods to handle both in-memory and disk-based image cubes.
- Offers native support for five-dimensional images (x, y, frequency, polarization, time) in a self-descriptive data format.
- Supports non-equidistant grid in time for the application of dynamic imaging methods (e.g., Johnson et al., 2017, Bouman et al., 2017).
- Supports non-equidistant grid in frequency for the application of multi-frequency imaging methods (e.g., Chael et al., 2023).
- Supports both in-memory and disk-based (lazily-loaded) image files.
- In-memory data is stored in a self-descriptive data type powered by [EHTDimensionalData.jl](https://github.com/EHTJulia/EHTDimensionalData.jl) (an extension of the powerful [DimensionalData.jl](https://github.com/rafaqz/DimensionalData.jl)).
- Disk-based data is based on NetCDF (on HDF5) accessed by [NCDatasets.jl](https://github.com/Alexander-Barth/NCDatasets.jl), allowing lazy access to data suitable for a large image cube that may not fit into memory and also for containing multiple image data sets inside a single file.
- Includes a FITS writer and loader compatible with the eht-imaging library (Chael et al., 2016, 2018) and SMILI (Akiyama et al., 2017a, b) for the EHT community, as well as with more traditional packages including AIPS, DIFMAP, and CASA software packages.
- Provides interactive plotting tools powered by [PythonPlot.jl](https://github.com/JuliaPy/PythonPlot.jl).
- Offers interactive tools to analyze, edit, and transform images using pure Julia native functions.
## Installation
Assuming that you already have Julia correctly installed, it suffices to import EHTImages.jl in the standard way:
```julia
using Pkg
Pkg.add("EHTImages")
```
EHTImages.jl relies on [PythonPlot.jl](https://github.com/stevengj/PythonPlot.jl) for image visualization. You can utilize a custom set of perceptually uniform colormaps implemented in the Python library [ehtplot](https://github.com/liamedeiros/ehtplot), which has been utilized in publications by the EHT Collaboration. To use these colormaps, follow the steps below to install ehtplot via [CondaPkg.jl](https://github.com/cjdoris/CondaPkg.jl) and import it using [PythonCall.jl](https://github.com/cjdoris/PythonCall.jl):
```julia
# Install CondaPkg.jl and PythonCall.jl (only needs to be executed once in your local/global Julia environment)
using Pkg
Pkg.add("CondaPkg")
Pkg.add("PythonCall")
# Install ehtplot (also only needs to be executed once in your local/global Julia environment)
using CondaPkg
CondaPkg.add_pip("ehtplot", version="@git+https://github.com/liamedeiros/ehtplot")
```
## What if you don't find a feature you want?
We are prioritizing to implement features needed for the image analysis conducted in the EHT and ngEHT Collaborations. Nevertheless, your feedback is really helpful to make the package widely useful for the broad community. Please request a feature in the [GitHub's issue page](https://github.com/EHTJulia/EHTImages.jl/issues).
## Acknowledgements
The development of this package has been finantially supported by the following programs.
- [AST-2107681](https://www.nsf.gov/awardsearch/showAward?AWD_ID=2107681), National Science Foundation, USA: v0.1.5 - present
- [AST-2034306](https://www.nsf.gov/awardsearch/showAward?AWD_ID=2034306), National Science Foundation, USA: v0.1.5 - present
- [OMA-2029670](https://www.nsf.gov/awardsearch/showAward?AWD_ID=2029670), National Science Foundation, USA: v0.1.5 - present
- [AST-1935980](https://www.nsf.gov/awardsearch/showAward?AWD_ID=1935980), National Science Foundation, USA: v0.1.5 - present
- [ALMA North American Development Study Cycle 8](https://science.nrao.edu/facilities/alma/science_sustainability/alma-develop-history), National Radio Astronomy Observatory, USA: v0.1.0 - v0.1.4
- The National Radio Astronomy Observatory is a facility of the National Science Foundation operated under cooperative agreement by Associated Universities, Inc. | EHTImages | https://github.com/EHTJulia/EHTImages.jl.git |
|
[
"MIT"
] | 0.2.4 | c033fd69366d1234a50afdeca8a4f654dea40afd | docs | 2712 | ```@meta
CurrentModule = EHTImages
```
# Intensity Image Data Types
The package provides the high-level abstract data type [`AbstractIntensityImage`](@ref) for five-dimensional images (x (right ascention), y (declination), frequency, polarization, time) and many methods to handle these data sets.
The package further provides two reference implementations of the 5D intensity image cube data sets: in-memory [`IntensityImage`](@ref) and NetCDF4-based [`DiskIntensityImage`](@ref) data types, which share common methods through [`AbstractIntensityImage`](@ref).
Intensity image data types adopt the following conventions:
- Equispaced grid along with `x` and `y` axes, which is left-handed following the standard of astronomy images.
- For the `x` and `y` axes, we follow the standard FITS convention — the first index along these axes (i.e., [x,y]=[1,1]) points to the lower-left (eastern- and southern-most) pixel.
- Non-equidistant grid in time for the application of dynamic imaging methods (e.g., Johnson et al., 2017, Bouman et al., 2017).
- Non-equidistant grid in frequency for the application of multi-frequency imaging methods (e.g., Chael et al., 2023).
- The intensity image cube is assumed to be real. This convention practically limits the polarization representation of images to the standard Stokes parameters (I, Q, U, V). The data type does not support other polarization representations such as (RR, LL, RL, LR) or (XX, YY, XY, YX), which are both image-domain counterparts of raw interferometric data and are generally described as complex functions.
## Abstract Intensity Images
The high-level abstract type of the 5D intensity image cube is defined by [`AbstractIntensityImage`](@ref). Here, [`AbstractImageDataSet`](@ref) is a common high-level abstract type for data sets handled in this package (i.e. not limited to the intensity cube). While [`AbstractIntensityImage`](@ref) is not a subtype of `AbstractArray`, it behaves like `AbstractArray` thanks to methods associated with [`AbstractImageDataSet`](@ref).
To handle the 5D intensity cube [`AbstractIntensityImage`](@ref) assumes following fields
- `data`: 5 dimensional array for intensity [x, y, polarization, frequency, time]
- `p`: 1 dimensional array for polarization codes in string (coordinate for polarization axis)
- `f`: 1 dimensional array for frequency in Hz (coordinate for frequency axis)
- `t`: 1 dimensional array for time in modified Julian dates (coordinate for time axis)
- `metadata`: `OrderedCollections.OrderedDict`-like object to stock metadata
Let's walk through methods defined in [`AbstractIntensityImage`](@ref) using its in-memory subtype [`IntensityImage`](@ref) in the next section.
| EHTImages | https://github.com/EHTJulia/EHTImages.jl.git |
|
[
"MIT"
] | 0.2.4 | c033fd69366d1234a50afdeca8a4f654dea40afd | docs | 7481 | # Disk Intensity Images
[`DiskIntensityImage`](@ref) provides a disk-based implementation of the 5D intensity image.
Image data is stored in a NetCDF4 container, and data will be lazily loaded. NetCDF4 is a widely-used ND-labeled data format, which uses HDF5 in the lower level and therefore are accessible by interfaces for both NetCDF4 and HDF5 available for all most programming langages used in Scientific Computing.
Internally, it uses a self-descriptive data set using the `NCDataset` type of [NCDatasets.jl](URL). Thanks to the feature-rich [NCDatasets.jl](URL), the package allows to store the image in an arbitrary HDF group --- this means users can store multiple data sets not limited to images in a single file.
## Converting an Image FITS file into NetCDF4 format
Here, we will use a FITS image created by Python's eht-imaging library, which was used in [Chael et al. 2023](https://ui.adsabs.harvard.edu/abs/2023ApJ...945...40C/abstract). Let's download the data and load into the`IntensityImage` instance.
```@example 1
using Downloads: download
using EHTImages
# Download a FITS image
fitsname = download("https://github.com/achael/multifrequency_scripts/raw/main/sec_4.2/images_M87_Chael/M87_230GHz_Chael.fits")
# Load the FITS image into an IntensityImage instance
image = load_fits(fitsname)
```
You can save data into NetCDF4 file with [`save_netcdf!`](@ref) method.
All of the loaded intensity images, including their metadata, are loaded into the field `image.dimstack::EHTDimensionalData.DimStack`.
```julia
image.dimstack # will give an access to the dimstack instance storing all image data.
```
[`IntensityImage`](@ref) is *immutable*, so users cannot change the `DimStack` object associated with an [`IntensityImage`](@ref) instance to something else. However, arrays and metadata stored in the `DimStack` object are *mutable*. This allows users to flexibly edit data inside.
## Accessing to and editing data
You can access to the raw array of the intensity image in dimension of (x, y, polarization, fequency, time) by
```@example 1
image.data # return an array of intensity in the unit of Jy/pixel
```
You can access to the label or coordinates of each axis by
```@example 1
image.p # return an array of polarization labels in string
```
```@example 1
image.t # return an array of time in MJD
```
```@example 1
image.f # return an array of frequencies in Hz
```
Now you can see that this particular image is a single-frequency, single-stokes, and single-epoch image with 512 x 512 pixels. For the spatial extent, there is a dedicated method [`get_xygrid(::AbstractIntensityImage)`](@ref). By default, it returns a series of central coordinates in radians.
```@example 1
xygrid = get_xygrid(image)
```
or you can specify a unit in mutliple ways.
```@example 1
# use string
xygrid = get_xygrid(image, "μas")
# use Unitful
using Unitful
using UnitfulAngles
xygrid = get_xygrid(image, u"μas")
# use preload units in EHTUtils
using EHTUtils # preload units
xygrid = get_xygrid(image, μas)
```
Now you see that this image has a field of view of about 1022 μas in each axes. If you need to sample a vector from each you can simply use `collect(xygrid[1])`.
Metadata are stored in `OrderedDict`. You can access to metadata from the `metadata` field.
```@example 1
image.metadata # access to metadata
```
As noted eariler, arrays and metadata stored in [`IntensityImage`](@ref) instances are *mutable*. This allows users to flexibly edit data inside.
```@example 1
image.metadata[:observer] = "Wonderful Astronomer" # edit metadata
image.metadata
```
## Plotting Images
### Intensity map
The package currently relies on [PythonPlot.jl](https://github.com/stevengj/PythonPlot.jl) for image visualization. It has a customized [`imshow`](@ref) method for [`AbstractIntensityImage`](@ref) type.
```julia
using EHTUtils # for shortcuts to flux and angular units
using PythonPlot
f = figure()
ax = gca()
imshow(image, fluxunit=1e9K, angunit=μas, scale=:linear, cmap="viridis")
```
```
Dict{Any, Any} with 4 entries:
"ylabelobj" => <py Text(0, 0.5, 'Relative Dec (μas)')>
"xlabelobj" => <py Text(0.5, 0, 'Relative RA (μas)')>
"colorbarobj" => <py matplotlib.colorbar.Colorbar object at 0x1803c3090>
"imshowobj" => <py matplotlib.image.AxesImage object at 0x180c20f10>
```

As you can see [`imshow`](@ref) will return all python objects in the generated plot so that users can further customize each component.
You can utilize a custom set of perceptually uniform colormaps implemented in the Python library [ehtplot](https://github.com/liamedeiros/ehtplot), which has been utilized in publications by the EHT Collaboration. After installing ehtplot via [CondaPkg.jl](https://github.com/cjdoris/CondaPkg.jl) (see the Installation section of the documentation), you can import and utilize it for image visualization using [PythonCall.jl](https://github.com/cjdoris/PythonCall.jl). For example:
```julia
using PythonCall # provides the `pyimport` function
ehtplot = pyimport("ehtplot")
f = figure()
ax = gca()
# use "afmhot_us" colormap in ehtplot, a standard colormap used in the EHT Collaboration
imshow(image, fluxunit=1e9K, angunit=μas, scale=:linear, cmap="afmhot_us")
xlim(200, -200)
ylim(-200, 200)
```

You can also change a scale. `imshow` method has three options (`:linear`, `:log`, and `:gamma`). The dynamic range of `:log` scale contour can be controlled by `dyrange`.
```julia
f = figure()
ax = gca()
imshow(image, fluxunit=1e9K, angunit=μas, scale=:log, dyrange=1000, cmap="gnuplot2_us")
```

For gamma scale, the power low can be controled by `gamma`:
```julia
f = figure()
ax = gca()
imshow(image, fluxunit=1e9K, angunit=μas, scale=:gamma, gamma=0.5, cmap="cubehelix_u")
```

### Toolkit for a custom plot of images
Sometimes, users might want to create a custom function to plot images. There are some useful sets of methods to assist with this. Additionally, the source code for the [`imshow`](@ref) method would be helpful for learning how to use PythonPlot for custom plotting.
- [`get_imextent`](@ref) method: This method will return the extent of the image in the specified angular unit for the `PythonPlot.imshow`'s `extent` argument. Users can plot images with the actual angular scales using `PythonPlot.imshow(array, origin="lower", extent=imextent)`.
- [`get_bconv`](@ref) method: This method derives a conversion factor from Jy/Pixel (the unit for the `data` field) to an arbitrary unit of intensity.
## Saving into a FITS file
You can save a 3D cube of a sliced image using the [`save_fits!`](@ref)`(::AbstractIntensityImage, filename::AbstractString, idx=(1,1))` method. The `idx` parameter here represents the (time, frequency) indices, as popular image FITS formats do not support non-equidistant grids for time and frequency. The exported FITS file is compatible with CASA, AIPS, DIFMAP, and other EHT imaging packages (eht-imaging and SMILI).
```julia
save_fits!(image, './foobar.fits')
```
## Create a brank new image
You can create a blank 5D image directy with the [`intensityimage`](@ref) function.
```@example
im = intensityimage(200, 1.0, μas)
```
You can specify also time, frequency, number of polarizations and all other metadata as well. Please see the docstring of [`intensityimage`](@ref) here. | EHTImages | https://github.com/EHTJulia/EHTImages.jl.git |
|
[
"MIT"
] | 0.2.4 | c033fd69366d1234a50afdeca8a4f654dea40afd | docs | 7508 | # In-memory Intensity Images
[`IntensityImage`](@ref) provides an in-memory implementation of the 5D intensity image. Internally, it uses a self-descriptive data set using the `DimStack` type of [EHTDimensionalData.jl](https://github.com/EHTJulia/EHTDimensionalData.jl), which is an extension of the powerful [DimensionalData.jl](https://github.com/rafaqz/DimensionalData.jl). Unless you are dealing with a huge size of images that will not fit within the memory of your computer, this will likely be the data type you will work with using this package.
## Loading an Image FITS file
Here, we will use a FITS image created by Python's eht-imaging library, which was used in [Chael et al. 2023](https://ui.adsabs.harvard.edu/abs/2023ApJ...945...40C/abstract). Let's download the data first.
```@example 1
using Downloads: download
# Download a FITS image
fitsname = download("https://github.com/achael/multifrequency_scripts/raw/main/sec_4.2/images_M87_Chael/M87_230GHz_Chael.fits")
```
A FITS file can be loaded into an [`IntensityImage`](@ref) instance using the [`load_fits`](@ref) method. This method utilizes [FITSIO.jl](https://github.com/JuliaAstro/FITSIO.jl), and you can also directly load data from an instance of the `FITS` data type in [FITSIO.jl](https://github.com/JuliaAstro/FITSIO.jl).
```@example 1
using EHTImages
# Load the FITS image into an IntensityImage instance
image = load_fits(fitsname)
```
All of the loaded intensity images, including their metadata, are loaded into the field `image.dimstack::EHTDimensionalData.DimStack`.
```julia
image.dimstack # will give an access to the dimstack instance storing all image data.
```
[`IntensityImage`](@ref) is *immutable*, so users cannot change the `DimStack` object associated with an [`IntensityImage`](@ref) instance to something else. However, arrays and metadata stored in the `DimStack` object are *mutable*. This allows users to flexibly edit data inside.
## Accessing to and editing data
You can access to the raw array of the intensity image in dimension of (x, y, polarization, fequency, time) by
```@example 1
image.data # return an array of intensity in the unit of Jy/pixel
```
You can access to the label or coordinates of each axis by
```@example 1
image.p # return an array of polarization labels in string
```
```@example 1
image.t # return an array of time in MJD
```
```@example 1
image.f # return an array of frequencies in Hz
```
Now you can see that this particular image is a single-frequency, single-stokes, and single-epoch image with 512 x 512 pixels. For the spatial extent, there is a dedicated method [`get_xygrid(::AbstractIntensityImage)`](@ref). By default, it returns a series of central coordinates in radians.
```@example 1
xygrid = get_xygrid(image)
```
or you can specify a unit in mutliple ways.
```@example 1
# use string
xygrid = get_xygrid(image, "μas")
# use Unitful
using Unitful
using UnitfulAngles
xygrid = get_xygrid(image, u"μas")
# use preload units in EHTUtils
using EHTUtils # preload units
xygrid = get_xygrid(image, μas)
```
Now you see that this image has a field of view of about 1022 μas in each axes. If you need to sample a vector from each you can simply use `collect(xygrid[1])`.
Metadata are stored in `OrderedDict`. You can access to metadata from the `metadata` field.
```@example 1
image.metadata # access to metadata
```
As noted eariler, arrays and metadata stored in [`IntensityImage`](@ref) instances are *mutable*. This allows users to flexibly edit data inside.
```@example 1
image.metadata[:observer] = "Wonderful Astronomer" # edit metadata
image.metadata
```
## Plotting Images
### Intensity map
The package currently relies on [PythonPlot.jl](https://github.com/stevengj/PythonPlot.jl) for image visualization. It has a customized [`imshow`](@ref) method for [`AbstractIntensityImage`](@ref) type.
```julia
using EHTUtils # for shortcuts to flux and angular units
using PythonPlot
f = figure()
ax = gca()
imshow(image, fluxunit=1e9K, angunit=μas, scale=:linear, cmap="viridis")
```
```
Dict{Any, Any} with 4 entries:
"ylabelobj" => <py Text(0, 0.5, 'Relative Dec (μas)')>
"xlabelobj" => <py Text(0.5, 0, 'Relative RA (μas)')>
"colorbarobj" => <py matplotlib.colorbar.Colorbar object at 0x1803c3090>
"imshowobj" => <py matplotlib.image.AxesImage object at 0x180c20f10>
```

As you can see [`imshow`](@ref) will return all python objects in the generated plot so that users can further customize each component.
You can utilize a custom set of perceptually uniform colormaps implemented in the Python library [ehtplot](https://github.com/liamedeiros/ehtplot), which has been utilized in publications by the EHT Collaboration. After installing ehtplot via [CondaPkg.jl](https://github.com/cjdoris/CondaPkg.jl) (see the Installation section of the documentation), you can import and utilize it for image visualization using [PythonCall.jl](https://github.com/cjdoris/PythonCall.jl). For example:
```julia
using PythonCall # provides the `pyimport` function
ehtplot = pyimport("ehtplot")
f = figure()
ax = gca()
# use "afmhot_us" colormap in ehtplot, a standard colormap used in the EHT Collaboration
imshow(image, fluxunit=1e9K, angunit=μas, scale=:linear, cmap="afmhot_us")
xlim(200, -200)
ylim(-200, 200)
```

You can also change a scale. `imshow` method has three options (`:linear`, `:log`, and `:gamma`). The dynamic range of `:log` scale contour can be controlled by `dyrange`.
```julia
f = figure()
ax = gca()
imshow(image, fluxunit=1e9K, angunit=μas, scale=:log, dyrange=1000, cmap="gnuplot2_us")
```

For gamma scale, the power low can be controled by `gamma`:
```julia
f = figure()
ax = gca()
imshow(image, fluxunit=1e9K, angunit=μas, scale=:gamma, gamma=0.5, cmap="cubehelix_u")
```

### Toolkit for a custom plot of images
Sometimes, users might want to create a custom function to plot images. There are some useful sets of methods to assist with this. Additionally, the source code for the [`imshow`](@ref) method would be helpful for learning how to use PythonPlot for custom plotting.
- [`get_imextent`](@ref) method: This method will return the extent of the image in the specified angular unit for the `PythonPlot.imshow`'s `extent` argument. Users can plot images with the actual angular scales using `PythonPlot.imshow(array, origin="lower", extent=imextent)`.
- [`get_bconv`](@ref) method: This method derives a conversion factor from Jy/Pixel (the unit for the `data` field) to an arbitrary unit of intensity.
## Saving into a FITS file
You can save a 3D cube of a sliced image using the [`save_fits!`](@ref)`(::AbstractIntensityImage, filename::AbstractString, idx=(1,1))` method. The `idx` parameter here represents the (time, frequency) indices, as popular image FITS formats do not support non-equidistant grids for time and frequency. The exported FITS file is compatible with CASA, AIPS, DIFMAP, and other EHT imaging packages (eht-imaging and SMILI).
```julia
save_fits!(image, './foobar.fits')
```
## Create a brank new image
You can create a blank 5D image directy with the [`intensityimage`](@ref) function.
```@example
im = intensityimage(200, 1.0, μas)
```
You can specify also time, frequency, number of polarizations and all other metadata as well. Please see the docstring of [`intensityimage`](@ref) here. | EHTImages | https://github.com/EHTJulia/EHTImages.jl.git |
|
[
"Apache-2.0"
] | 0.2.3 | 8a261ee237dbd5fae2e10192cfd8b1d465c6d7eb | code | 136 | module LatticeSites
using StaticArrays, Random
include("spins.jl")
include("space.jl")
include("flip.jl")
include("lattices.jl")
end
| LatticeSites | https://github.com/Roger-luo/LatticeSites.jl.git |
|
[
"Apache-2.0"
] | 0.2.3 | 8a261ee237dbd5fae2e10192cfd8b1d465c6d7eb | code | 1228 | export ups, downs
ups(::Type{ST}, dims::Int...) where ST = ups(ST, dims)
downs(::Type{ST}, dims::Int...) where ST = downs(ST, dims)
ups(::Type{ST}, dims::Dims) where ST = fill(up(ST), dims)
downs(::Type{ST}, dims::Dims) where ST = fill(down(ST), dims)
Random.rand(::Type{Bit{T}}, dims::Dims) where T = Bit{T}.(rand(Bool, dims))
Random.rand(::Type{Spin{T}}, dims::Dims) where T = Spin{T}.(2 * rand(Bool, dims) .- 1)
Random.rand(::Type{Half{T}}, dims::Dims) where T = Half{T}.(rand(Bool, dims) .- 0.5)
# static array
import Base: @_inline_meta
ups(::Type{SA}) where {SA <: StaticArray} = _ups(Size(SA), SA)
@generated function _ups(::Size{s}, ::Type{SA}) where {s, SA <: StaticArray}
T = eltype(SA)
if T == Any
T = Bit{Float64} # default
end
v = [:(up($T)) for i = 1:prod(s)]
return quote
@_inline_meta
$SA(tuple($(v...)))
end
end
downs(::Type{SA}) where {SA <: StaticArray} = _downs(Size(SA), SA)
@generated function _downs(::Size{s}, ::Type{SA}) where {s, SA <: StaticArray}
T = eltype(SA)
if T == Any
T = Bit{Float64} # default
end
v = [:(down($T)) for i = 1:prod(s)]
return quote
@_inline_meta
$SA(tuple($(v...)))
end
end
| LatticeSites | https://github.com/Roger-luo/LatticeSites.jl.git |
|
[
"Apache-2.0"
] | 0.2.3 | 8a261ee237dbd5fae2e10192cfd8b1d465c6d7eb | code | 915 | struct FixedLengthBits{L, N}
chunks::NTuple{N, UInt64}
end
FixedLengthBits(x::Int) = FixedLengthBits{64 - leading_zeros(x), 1}((UInt(x), ))
const _msk64 = ~UInt64(0)
@inline _div64(l) = l >> 6
@inline _mod64(l) = l & 63
@inline _msk_end(l::Integer) = _msk64 >>> _mod64(-l)
@inline _msk_end(B::FixedLengthBits{L}) where L = _msk_end(L)
num_bit_chunks(n::Int) = _div64(n+63)
function Base.getindex(x::FixedLengthBits{L}, i::Int) where L
@boundscheck 0 < i ≤ L ? true : throw(BoundsError(x, i))
Int((x.chunks[_div64(i) + 1] >> (_mod64(i) - 1)) & 0x01)
end
function Base.iterate(x::FixedLengthBits{L}, state=1) where L
state == L + 1 && return nothing
@inbounds(x[state]), state + 1
end
Base.length(::FixedLengthBits{L}) where L = L
Base.eltype(::FixedLengthBits) = Int
function Base.show(io::IO, x::FixedLengthBits{L}) where L
@inbounds for i = L:-1:1
print(io, x[i])
end
end
| LatticeSites | https://github.com/Roger-luo/LatticeSites.jl.git |
|
[
"Apache-2.0"
] | 0.2.3 | 8a261ee237dbd5fae2e10192cfd8b1d465c6d7eb | code | 2358 | export fill_bit!
function Base.convert(::Type{ST}, x::T2) where {T1 <: Number, T2 <: Number, ST <: BinarySite{T1}}
round(ST, T1(x))
end
for ST1 in [:Bit, :Spin, :Half]
@eval Base.convert(::Type{$ST1{T}}, x::$ST1) where T = $ST1{T}(T(x.value))
@eval Base.convert(::Type{T1}, x::$ST1{T2}) where {T1, T2} = T1(x.value)
@eval Base.getindex(::Type{$ST1}, args::T...) where T = $ST1{T}[args...]
end
Base.convert(::Type{ST1}, x::ST2) where {ST1 <: BinarySite, ST2 <: BinarySite} =
x == up(ST2) ? up(ST1) : down(ST1)
Base.convert(::Type{SA}, x::Int) where {S, T, N, SA <: StaticArray{S, Bit{T}, N}} =
_convert(Size(SA), eltype(SA), SA, x)
get_bit(::Type{T}, x::Int, i::Int) where T = T((x >> (i - 1)) & 1)
function fill_bit!(A::Array{T}, n::Int) where T
@inbounds for i in 1:length(A)
A[i] = get_bit(T, n, i)
end
A
end
@generated function _convert(::Size{s}, ::Type{Bit{T}}, ::Type{SA}, x::Int) where {s, T, SA}
v = [:(get_bit($T, x, $i)) for i = 1:prod(s)]
return quote
@_inline_meta
$SA(tuple($(v...)))
end
end
# Arrays
for IntType in (:Int8, :Int16, :Int32, :Int64, :Int128, :BigInt)
@eval Base.$IntType(x::Bit) = convert(IntType, x)
@eval Base.convert(::Type{$IntType}, x::Bit{T}) where T = convert($IntType, x.value)
@eval begin
Base.$IntType(x::AbstractArray{Bit{T}, N}) where {T, N} = convert($IntType, x)
function Base.convert(::Type{$IntType}, x::AbstractArray{Bit{T}, N}) where {T, N}
if sizeof($IntType) * 8 < length(x)
throw(InexactError(:convert, $IntType, x))
end
sum(convert($IntType, each) << (i-1) for (i, each) in enumerate(x))
end
function Base.convert(::Type{$IntType}, x::AbstractArray{Spin{T}, N}) where {T, N}
if sizeof($IntType) * 8 < length(x)
throw(InexactError(:convert, $IntType, x))
end
$IntType(sum($IntType(div(each.value+1, 2)) << (i-1) for (i, each) in enumerate(x)))
end
function Base.convert(::Type{$IntType}, x::AbstractArray{Half{T}, N}) where {T, N}
if sizeof($IntType) * 8 < length(x)
throw(InexactError(:convert, $IntType, x))
end
$IntType(sum($IntType(each.value+0.5) << (i-1) for (i, each) in enumerate(x)))
end
end
end
| LatticeSites | https://github.com/Roger-luo/LatticeSites.jl.git |
|
[
"Apache-2.0"
] | 0.2.3 | 8a261ee237dbd5fae2e10192cfd8b1d465c6d7eb | code | 2140 | export flip, flip!, randflip!
"""
FlipStyle
Abstract type for flip styles.
"""
abstract type FlipStyle end
"""
flip(site)
Defines how to flip this type of `site`.
"""
function flip end
flip(s::BT) where {BT <: BinarySite} = s == up(BT) ? down(BT) : up(BT)
"""
flip!(S, I...)
Flips given configuration `S` at index `I...`.
"""
function flip! end
flip!(S::AbstractArray{BT, N}, I...) where {BT <: BinarySite, N} = flip!(S, I)
flip!(S::AbstractArray{BT, N}, I::Tuple) where {BT <: BinarySite, N} = (@inbounds S[I...] = flip(S[I...]); S)
"""
randflip!(config) -> (proposed_index, config)
randflip!(::FlipStyle, config) -> (proposed_index, config)
Flip the lattice configuration randomly using given flip style, default flip style is
[`UniformFlip`](@ref){1}, which choose **one** site in the configuration uniformly and flip it.
One should always be able to use `config[proposed_index]` to get the current value of
this lattice configuration. Whether one can change the site, depends on whether the
configuration is stored in a mutable type.
"""
function randflip! end
randflip!(S::AbstractArray) = randflip!(FlipStyle(S), S)
"""
UniformFlip{N}
Choose `N` sites in the configuration uniformly and flip it
"""
struct UniformFlip{N} <: FlipStyle end
UniformFlip(N::Int) = UniformFlip{N}()
FlipStyle(S::AbstractArray{BT, N}) where {BT <: BinarySite, N} = UniformFlip(1) # use uniform(1) as default
function randflip!(::UniformFlip{1}, S::AbstractArray{BT}) where {BT <: BinarySite}
proposed_index = rand(1:length(S))
flip!(S, proposed_index)
proposed_index, S
end
# TODO:
# 1. minimum sampling length of index can be calculated
# use this to generate a shorter sequence
# 2. specialized method for static arrays (their length can be found in compile time)
function randflip!(::UniformFlip{N}, S::AbstractArray{BT}) where {BT <: BinarySite, N}
proposed = zeros(Int, N)
count = 0
while count != N
i = rand(1:length(S))
i in proposed || (count += 1; proposed[count] = i)
end
@inbounds for i in 1:N
flip!(S, proposed[i])
end
proposed, S
end
| LatticeSites | https://github.com/Roger-luo/LatticeSites.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.