licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 13800 | # LLVM IR optimization
function optimize!(@nospecialize(job::CompilerJob), mod::LLVM.Module; opt_level=1)
tm = llvm_machine(job.config.target)
global current_job
current_job = job
@dispose pb=NewPMPassBuilder() begin
register!(pb, CPUFeaturesPass())
register!(pb, LowerPTLSPass())
register!(pb, LowerGCFramePass())
register!(pb, AddKernelStatePass())
register!(pb, LowerKernelStatePass())
register!(pb, CleanupKernelStatePass())
add!(pb, NewPMModulePassManager()) do mpm
buildNewPMPipeline!(mpm, job, opt_level)
end
run!(pb, mod, tm)
end
optimize_module!(job, mod)
run!(DeadArgumentEliminationPass(), mod, tm)
return
end
function buildNewPMPipeline!(mpm, @nospecialize(job::CompilerJob), opt_level)
buildEarlySimplificationPipeline(mpm, job, opt_level)
add!(mpm, AlwaysInlinerPass())
buildEarlyOptimizerPipeline(mpm, job, opt_level)
add!(mpm, NewPMFunctionPassManager()) do fpm
buildLoopOptimizerPipeline(fpm, job, opt_level)
buildScalarOptimizerPipeline(fpm, job, opt_level)
if uses_julia_runtime(job) && opt_level >= 2
# XXX: we disable vectorization, as this generally isn't useful for GPU targets
# and actually causes issues with some back-end compilers (like Metal).
# TODO: Make this not dependent on `uses_julia_runtime` (likely CPU), but it's own control
buildVectorPipeline(fpm, job, opt_level)
end
if isdebug(:optim)
add!(fpm, WarnMissedTransformationsPass())
end
end
buildIntrinsicLoweringPipeline(mpm, job, opt_level)
buildCleanupPipeline(mpm, job, opt_level)
end
const BasicSimplifyCFGOptions =
(; convert_switch_range_to_icmp=true,
convert_switch_to_lookup_table=true,
forward_switch_cond_to_phi=true,
)
const AggressiveSimplifyCFGOptions =
(; convert_switch_range_to_icmp=true,
convert_switch_to_lookup_table=true,
forward_switch_cond_to_phi=true,
# These mess with loop rotation, so only do them after that
hoist_common_insts=true,
# Causes an SRET assertion error in late-gc-lowering
#sink_common_insts=true
)
function buildEarlySimplificationPipeline(mpm, @nospecialize(job::CompilerJob), opt_level)
if should_verify()
add!(mpm, NewPMFunctionPassManager()) do fpm
add!(fpm, GCInvariantVerifierPass())
end
add!(mpm, VerifierPass())
end
add!(mpm, ForceFunctionAttrsPass())
# TODO invokePipelineStartCallbacks
add!(mpm, Annotation2MetadataPass())
add!(mpm, ConstantMergePass())
add!(mpm, NewPMFunctionPassManager()) do fpm
add!(fpm, LowerExpectIntrinsicPass())
if opt_level >= 2
add!(fpm, PropagateJuliaAddrspacesPass())
end
add!(fpm, SimplifyCFGPass(; BasicSimplifyCFGOptions...))
if opt_level >= 1
add!(fpm, DCEPass())
add!(fpm, SROAPass())
end
end
# TODO invokeEarlySimplificationCallbacks
end
function buildEarlyOptimizerPipeline(mpm, @nospecialize(job::CompilerJob), opt_level)
add!(mpm, NewPMCGSCCPassManager()) do cgpm
# TODO invokeCGSCCCallbacks
add!(cgpm, NewPMFunctionPassManager()) do fpm
add!(fpm, AllocOptPass())
add!(fpm, Float2IntPass())
add!(fpm, LowerConstantIntrinsicsPass())
end
end
add!(mpm, CPUFeaturesPass())
if opt_level >= 1
add!(mpm, NewPMFunctionPassManager()) do fpm
if opt_level >= 2
add!(fpm, SROAPass())
add!(fpm, InstCombinePass())
add!(fpm, JumpThreadingPass())
add!(fpm, CorrelatedValuePropagationPass())
add!(fpm, ReassociatePass())
add!(fpm, EarlyCSEPass())
add!(fpm, AllocOptPass())
else
add!(fpm, InstCombinePass())
add!(fpm, EarlyCSEPass())
end
end
# TODO invokePeepholeCallbacks
end
end
function buildLoopOptimizerPipeline(fpm, @nospecialize(job::CompilerJob), opt_level)
add!(fpm, NewPMLoopPassManager()) do lpm
add!(lpm, LowerSIMDLoopPass())
if opt_level >= 2
add!(lpm, LoopRotatePass())
end
# TODO invokeLateLoopOptimizationCallbacks
end
if opt_level >= 2
add!(fpm, NewPMLoopPassManager(; use_memory_ssa=true)) do lpm
add!(lpm, LICMPass())
add!(lpm, JuliaLICMPass())
add!(lpm, SimpleLoopUnswitchPass(nontrivial=true, trivial=true))
add!(lpm, LICMPass())
add!(lpm, JuliaLICMPass())
end
end
if opt_level >= 2
add!(fpm, IRCEPass())
end
add!(fpm, NewPMLoopPassManager()) do lpm
if opt_level >= 2
add!(lpm, LoopInstSimplifyPass())
add!(lpm, LoopIdiomRecognizePass())
add!(lpm, IndVarSimplifyPass())
add!(lpm, LoopDeletionPass())
add!(lpm, LoopFullUnrollPass())
end
# TODO invokeLoopOptimizerEndCallbacks
end
end
function buildScalarOptimizerPipeline(fpm, @nospecialize(job::CompilerJob), opt_level)
if opt_level >= 2
add!(fpm, AllocOptPass())
add!(fpm, SROAPass())
add!(fpm, InstSimplifyPass())
add!(fpm, GVNPass())
add!(fpm, MemCpyOptPass())
add!(fpm, SCCPPass())
add!(fpm, CorrelatedValuePropagationPass())
add!(fpm, DCEPass())
add!(fpm, IRCEPass())
add!(fpm, InstCombinePass())
add!(fpm, JumpThreadingPass())
end
if opt_level >= 3
add!(fpm, GVNPass())
end
if opt_level >= 2
add!(fpm, DSEPass())
# TODO invokePeepholeCallbacks
add!(fpm, SimplifyCFGPass(; AggressiveSimplifyCFGOptions...))
add!(fpm, AllocOptPass())
add!(fpm, NewPMLoopPassManager()) do lpm
add!(lpm, LoopDeletionPass())
add!(lpm, LoopInstSimplifyPass())
end
add!(fpm, LoopDistributePass())
end
# TODO invokeScalarOptimizerCallbacks
end
function buildVectorPipeline(fpm, @nospecialize(job::CompilerJob), opt_level)
add!(fpm, InjectTLIMappings())
add!(fpm, LoopVectorizePass())
add!(fpm, LoopLoadEliminationPass())
add!(fpm, InstCombinePass())
add!(fpm, SimplifyCFGPass(; AggressiveSimplifyCFGOptions...))
add!(fpm, SLPVectorizerPass())
add!(fpm, VectorCombinePass())
# TODO invokeVectorizerCallbacks
add!(fpm, ADCEPass())
add!(fpm, LoopUnrollPass(; opt_level))
end
function buildIntrinsicLoweringPipeline(mpm, @nospecialize(job::CompilerJob), opt_level)
add!(mpm, RemoveNIPass())
# lower GC intrinsics
if !uses_julia_runtime(job)
add!(mpm, NewPMFunctionPassManager()) do fpm
add!(fpm, LowerGCFramePass())
end
end
# lower kernel state intrinsics
# NOTE: we can only do so here, as GC lowering can introduce calls to the runtime,
# and thus additional uses of the kernel state intrinsics.
if job.config.kernel
# TODO: now that all kernel state-related passes are being run here, merge some?
add!(mpm, AddKernelStatePass())
add!(mpm, NewPMFunctionPassManager()) do fpm
add!(fpm, LowerKernelStatePass())
end
add!(mpm, CleanupKernelStatePass())
end
if !uses_julia_runtime(job)
# remove dead uses of ptls
add!(mpm, NewPMFunctionPassManager()) do fpm
add!(fpm, ADCEPass())
end
add!(mpm, LowerPTLSPass())
end
add!(mpm, NewPMFunctionPassManager()) do fpm
# lower exception handling
if uses_julia_runtime(job)
add!(fpm, LowerExcHandlersPass())
end
add!(fpm, GCInvariantVerifierPass())
add!(fpm, LateLowerGCPass())
if uses_julia_runtime(job) && VERSION >= v"1.11.0-DEV.208"
add!(fpm, FinalLowerGCPass())
end
end
if uses_julia_runtime(job) && VERSION < v"1.11.0-DEV.208"
add!(mpm, FinalLowerGCPass())
end
if opt_level >= 2
add!(mpm, NewPMFunctionPassManager()) do fpm
add!(fpm, GVNPass())
add!(fpm, SCCPPass())
add!(fpm, DCEPass())
end
end
# lower PTLS intrinsics
if uses_julia_runtime(job)
add!(mpm, LowerPTLSPass())
end
if opt_level >= 1
add!(mpm, NewPMFunctionPassManager()) do fpm
add!(fpm, InstCombinePass())
add!(fpm, SimplifyCFGPass(; AggressiveSimplifyCFGOptions...))
end
end
# remove Julia address spaces
add!(mpm, RemoveJuliaAddrspacesPass())
# Julia's operand bundles confuse the inliner, so repeat here now they are gone.
# FIXME: we should fix the inliner so that inlined code gets optimized early-on
add!(mpm, AlwaysInlinerPass())
end
function buildCleanupPipeline(mpm, @nospecialize(job::CompilerJob), opt_level)
if opt_level >= 2
add!(mpm, NewPMFunctionPassManager()) do fpm
add!(fpm, CombineMulAddPass())
add!(fpm, DivRemPairsPass())
end
end
# TODO invokeOptimizerLastCallbacks
add!(mpm, NewPMFunctionPassManager()) do fpm
add!(fpm, AnnotationRemarksPass())
end
add!(mpm, NewPMFunctionPassManager()) do fpm
add!(fpm, DemoteFloat16Pass())
if opt_level >= 1
add!(fpm, GVNPass())
end
end
end
## custom passes
# lowering intrinsics
function cpu_features!(mod::LLVM.Module)
job = current_job::CompilerJob
changed = false
argtyps = Dict(
"f32" => Float32,
"f64" => Float64,
)
# have_fma
for f in functions(mod)
ft = function_type(f)
fn = LLVM.name(f)
startswith(fn, "julia.cpu.have_fma.") || continue
typnam = fn[20:end]
# determine whether this back-end supports FMA on this type
has_fma = if haskey(argtyps, typnam)
typ = argtyps[typnam]
have_fma(job.config.target, typ)
else
# warn?
false
end
has_fma = ConstantInt(return_type(ft), has_fma)
# substitute all uses of the intrinsic with a constant
materialized = LLVM.Value[]
for use in uses(f)
val = user(use)
replace_uses!(val, has_fma)
push!(materialized, val)
end
# remove the intrinsic and its uses
for val in materialized
@assert isempty(uses(val))
erase!(val)
end
@assert isempty(uses(f))
erase!(f)
end
return changed
end
CPUFeaturesPass() = NewPMModulePass("GPULowerCPUFeatures", cpu_features!)
# lower object allocations to to PTX malloc
#
# this is a PoC implementation that is very simple: allocate, and never free. it also runs
# _before_ Julia's GC lowering passes, so we don't get to use the results of its analyses.
# when we ever implement a more potent GC, we will need those results, but the relevant pass
# is currently very architecture/CPU specific: hard-coded pool sizes, TLS references, etc.
# such IR is hard to clean-up, so we probably will need to have the GC lowering pass emit
# lower-level intrinsics which then can be lowered to architecture-specific code.
function lower_gc_frame!(fun::LLVM.Function)
job = current_job::CompilerJob
mod = LLVM.parent(fun)
changed = false
# plain alloc
if haskey(functions(mod), "julia.gc_alloc_obj")
alloc_obj = functions(mod)["julia.gc_alloc_obj"]
alloc_obj_ft = function_type(alloc_obj)
T_prjlvalue = return_type(alloc_obj_ft)
T_pjlvalue = convert(LLVMType, Any; allow_boxed=true)
for use in uses(alloc_obj)
call = user(use)::LLVM.CallInst
# decode the call
ops = arguments(call)
sz = ops[2]
# replace with PTX alloc_obj
@dispose builder=IRBuilder() begin
position!(builder, call)
ptr = call!(builder, Runtime.get(:gc_pool_alloc), [sz])
replace_uses!(call, ptr)
end
erase!(call)
changed = true
end
@compiler_assert isempty(uses(alloc_obj)) job
end
# we don't care about write barriers
if haskey(functions(mod), "julia.write_barrier")
barrier = functions(mod)["julia.write_barrier"]
for use in uses(barrier)
call = user(use)::LLVM.CallInst
erase!(call)
changed = true
end
@compiler_assert isempty(uses(barrier)) job
end
return changed
end
LowerGCFramePass() = NewPMFunctionPass("GPULowerGCFrame", lower_gc_frame!)
# lower the `julia.ptls_states` intrinsic by removing it, since it is GPU incompatible.
#
# this assumes and checks that the TLS is unused, which should be the case for most GPU code
# after lowering the GC intrinsics to TLS-less code and having run DCE.
#
# TODO: maybe don't have Julia emit actual uses of the TLS, but use intrinsics instead,
# making it easier to remove or reimplement that functionality here.
function lower_ptls!(mod::LLVM.Module)
job = current_job::CompilerJob
changed = false
intrinsic = "julia.get_pgcstack"
if haskey(functions(mod), intrinsic)
ptls_getter = functions(mod)[intrinsic]
for use in uses(ptls_getter)
val = user(use)
if isempty(uses(val))
erase!(val)
changed = true
else
# the validator will detect this
end
end
end
return changed
end
LowerPTLSPass() = NewPMModulePass("GPULowerPTLS", lower_ptls!)
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 1417 | using PrecompileTools: @setup_workload, @compile_workload
@setup_workload begin
precompile_module = @eval module $(gensym())
using ..GPUCompiler
module DummyRuntime
# dummy methods
signal_exception() = return
malloc(sz) = C_NULL
report_oom(sz) = return
report_exception(ex) = return
report_exception_name(ex) = return
report_exception_frame(idx, func, file, line) = return
end
struct DummyCompilerParams <: AbstractCompilerParams end
const DummyCompilerJob = CompilerJob{NativeCompilerTarget, DummyCompilerParams}
GPUCompiler.runtime_module(::DummyCompilerJob) = DummyRuntime
end
kernel() = nothing
@compile_workload begin
source = methodinstance(typeof(kernel), Tuple{})
target = NativeCompilerTarget()
params = precompile_module.DummyCompilerParams()
config = CompilerConfig(target, params)
job = CompilerJob(source, config)
JuliaContext() do ctx
# XXX: on Windows, compiling the GPU runtime leaks GPU code in the native cache,
# so prevent building the runtime library (see JuliaGPU/GPUCompiler.jl#601)
GPUCompiler.compile(:asm, job; libraries=false)
end
end
# reset state that was initialized during precompilation
__llvm_initialized[] = false
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 18490 | # implementation of the GPUCompiler interfaces for generating PTX code
## target
export PTXCompilerTarget
Base.@kwdef struct PTXCompilerTarget <: AbstractCompilerTarget
cap::VersionNumber
ptx::VersionNumber = v"6.0" # for compatibility with older versions of CUDA.jl
# codegen quirks
## can we emit debug info in the PTX assembly?
debuginfo::Bool = false
# optional properties
minthreads::Union{Nothing,Int,NTuple{<:Any,Int}} = nothing
maxthreads::Union{Nothing,Int,NTuple{<:Any,Int}} = nothing
blocks_per_sm::Union{Nothing,Int} = nothing
maxregs::Union{Nothing,Int} = nothing
fastmath::Bool = Base.JLOptions().fast_math == 1
# deprecated; remove with next major version
exitable::Union{Nothing,Bool} = nothing
unreachable::Union{Nothing,Bool} = nothing
end
function Base.hash(target::PTXCompilerTarget, h::UInt)
h = hash(target.cap, h)
h = hash(target.ptx, h)
h = hash(target.debuginfo, h)
h = hash(target.minthreads, h)
h = hash(target.maxthreads, h)
h = hash(target.blocks_per_sm, h)
h = hash(target.maxregs, h)
h = hash(target.fastmath, h)
h
end
source_code(target::PTXCompilerTarget) = "ptx"
llvm_triple(target::PTXCompilerTarget) = Int===Int64 ? "nvptx64-nvidia-cuda" : "nvptx-nvidia-cuda"
function llvm_machine(target::PTXCompilerTarget)
triple = llvm_triple(target)
t = Target(triple=triple)
tm = TargetMachine(t, triple, "sm_$(target.cap.major)$(target.cap.minor)",
"+ptx$(target.ptx.major)$(target.ptx.minor)")
asm_verbosity!(tm, true)
return tm
end
# the default datalayout does not match the one in the NVPTX user guide
llvm_datalayout(target::PTXCompilerTarget) =
# little endian
"e-" *
# on 32-bit systems, use 32-bit pointers.
# on 64-bit systems, use 64-bit pointers.
(Int === Int64 ? "p:64:64:64-" : "p:32:32:32-") *
# alignment of integer types
"i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-" *
# alignment of floating point types
"f32:32:32-f64:64:64-" *
# alignment of vector types
"v16:16:16-v32:32:32-v64:64:64-v128:128:128-" *
# native integer widths
"n16:32:64"
have_fma(@nospecialize(target::PTXCompilerTarget), T::Type) = true
dwarf_version(target::PTXCompilerTarget) = Int32(2) # Cuda only supports dwarfv2
## job
function Base.show(io::IO, @nospecialize(job::CompilerJob{PTXCompilerTarget}))
print(io, "PTX CompilerJob of ", job.source)
print(io, " for sm_$(job.config.target.cap.major)$(job.config.target.cap.minor)")
job.config.target.minthreads !== nothing && print(io, ", minthreads=$(job.config.target.minthreads)")
job.config.target.maxthreads !== nothing && print(io, ", maxthreads=$(job.config.target.maxthreads)")
job.config.target.blocks_per_sm !== nothing && print(io, ", blocks_per_sm=$(job.config.target.blocks_per_sm)")
job.config.target.maxregs !== nothing && print(io, ", maxregs=$(job.config.target.maxregs)")
job.config.target.fastmath && print(io, ", fast math enabled")
end
const ptx_intrinsics = ("vprintf", "__assertfail", "malloc", "free")
isintrinsic(@nospecialize(job::CompilerJob{PTXCompilerTarget}), fn::String) =
in(fn, ptx_intrinsics)
# XXX: the debuginfo part should be handled by GPUCompiler as it applies to all back-ends.
runtime_slug(@nospecialize(job::CompilerJob{PTXCompilerTarget})) =
"ptx$(job.config.target.ptx.major)$(job.config.target.ptx.minor)" *
"-sm_$(job.config.target.cap.major)$(job.config.target.cap.minor)" *
"-debuginfo=$(Int(llvm_debug_info(job)))"
function finish_module!(@nospecialize(job::CompilerJob{PTXCompilerTarget}),
mod::LLVM.Module, entry::LLVM.Function)
# emit the device capability and ptx isa version as constants in the module. this makes
# it possible to 'query' these in device code, relying on LLVM to optimize the checks
# away and generate static code. note that we only do so if there's actual uses of these
# variables; unconditionally creating a gvar would result in duplicate declarations.
for (name, value) in ["sm_major" => job.config.target.cap.major,
"sm_minor" => job.config.target.cap.minor,
"ptx_major" => job.config.target.ptx.major,
"ptx_minor" => job.config.target.ptx.minor]
if haskey(globals(mod), name)
gv = globals(mod)[name]
initializer!(gv, ConstantInt(LLVM.Int32Type(), value))
# change the linkage so that we can inline the value
linkage!(gv, LLVM.API.LLVMPrivateLinkage)
end
end
# update calling convention
if LLVM.version() >= v"8"
for f in functions(mod)
# JuliaGPU/GPUCompiler.jl#97
#callconv!(f, LLVM.API.LLVMPTXDeviceCallConv)
end
end
if job.config.kernel && LLVM.version() >= v"8"
callconv!(entry, LLVM.API.LLVMPTXKernelCallConv)
end
if job.config.kernel
# work around bad byval codegen (JuliaGPU/GPUCompiler.jl#92)
entry = lower_byval(job, mod, entry)
end
# we emit properties (of the device and ptx isa) as private global constants,
# so run the optimizer so that they are inlined before the rest of the optimizer runs.
@dispose pb=NewPMPassBuilder() begin
add!(pb, RecomputeGlobalsAAPass())
add!(pb, GlobalOptPass())
run!(pb, mod, llvm_machine(job.config.target))
end
return entry
end
function optimize_module!(@nospecialize(job::CompilerJob{PTXCompilerTarget}),
mod::LLVM.Module)
tm = llvm_machine(job.config.target)
# TODO: Use the registered target passes (JuliaGPU/GPUCompiler.jl#450)
@dispose pb=NewPMPassBuilder() begin
register!(pb, NVVMReflectPass())
add!(pb, NewPMFunctionPassManager()) do fpm
# TODO: need to run this earlier; optimize_module! is called after addOptimizationPasses!
add!(fpm, NVVMReflectPass())
# needed by GemmKernels.jl-like code
add!(fpm, SpeculativeExecutionPass())
# NVPTX's target machine info enables runtime unrolling,
# but Julia's pass sequence only invokes the simple unroller.
add!(fpm, LoopUnrollPass(; job.config.opt_level))
add!(fpm, InstCombinePass()) # clean-up redundancy
add!(fpm, NewPMLoopPassManager(; use_memory_ssa=true)) do lpm
add!(lpm, LICMPass()) # the inner runtime check might be
# outer loop invariant
end
# the above loop unroll pass might have unrolled regular, non-runtime nested loops.
# that code still needs to be optimized (arguably, multiple unroll passes should be
# scheduled by the Julia optimizer). do so here, instead of re-optimizing entirely.
if job.config.opt_level == 2
add!(fpm, GVNPass())
elseif job.config.opt_level == 1
add!(fpm, EarlyCSEPass())
end
add!(fpm, DSEPass())
add!(fpm, SimplifyCFGPass())
end
# get rid of the internalized functions; now possible unused
add!(pb, GlobalDCEPass())
run!(pb, mod, tm)
end
end
function finish_ir!(@nospecialize(job::CompilerJob{PTXCompilerTarget}),
mod::LLVM.Module, entry::LLVM.Function)
if LLVM.version() < v"17"
for f in functions(mod)
lower_unreachable!(f)
end
end
if job.config.kernel
# add metadata annotations for the assembler to the module
# property annotations
annotations = Metadata[entry]
## kernel metadata
append!(annotations, [MDString("kernel"),
ConstantInt(Int32(1))])
## expected CTA sizes
if job.config.target.minthreads !== nothing
for (dim, name) in enumerate([:x, :y, :z])
bound = dim <= length(job.config.target.minthreads) ? job.config.target.minthreads[dim] : 1
append!(annotations, [MDString("reqntid$name"),
ConstantInt(Int32(bound))])
end
end
if job.config.target.maxthreads !== nothing
for (dim, name) in enumerate([:x, :y, :z])
bound = dim <= length(job.config.target.maxthreads) ? job.config.target.maxthreads[dim] : 1
append!(annotations, [MDString("maxntid$name"),
ConstantInt(Int32(bound))])
end
end
if job.config.target.blocks_per_sm !== nothing
append!(annotations, [MDString("minctasm"),
ConstantInt(Int32(job.config.target.blocks_per_sm))])
end
if job.config.target.maxregs !== nothing
append!(annotations, [MDString("maxnreg"),
ConstantInt(Int32(job.config.target.maxregs))])
end
push!(metadata(mod)["nvvm.annotations"], MDNode(annotations))
end
return entry
end
function llvm_debug_info(@nospecialize(job::CompilerJob{PTXCompilerTarget}))
# allow overriding the debug info from CUDA.jl
if job.config.target.debuginfo
invoke(llvm_debug_info, Tuple{CompilerJob}, job)
else
LLVM.API.LLVMDebugEmissionKindNoDebug
end
end
## LLVM passes
# lower `unreachable` to `exit` so that the emitted PTX has correct control flow
#
# During back-end compilation, `ptxas` inserts instructions to manage the harware's
# reconvergence stack (SSY and SYNC). In order to do so, it needs to identify
# divergent regions:
#
# entry:
# // start of divergent region
# @%p0 bra cont;
# ...
# bra.uni cont;
# cont:
# // end of divergent region
# bar.sync 0;
#
# Meanwhile, LLVM's branch-folder and block-placement MIR passes will try to optimize
# the block layout, e.g., by placing unlikely blocks at the end of the function:
#
# entry:
# // start of divergent region
# @%p0 bra cont;
# @%p1 bra unlikely;
# bra.uni cont;
# cont:
# // end of divergent region
# bar.sync 0;
# unlikely:
# bra.uni cont;
#
# That is not a problem as long as the unlikely block continunes back into the
# divergent region. Crucially, this is not the case with unreachable control flow:
#
# entry:
# // start of divergent region
# @%p0 bra cont;
# @%p1 bra throw;
# bra.uni cont;
# cont:
# bar.sync 0;
# throw:
# call throw_and_trap();
# // unreachable
# exit:
# // end of divergent region
# ret;
#
# Dynamically, this is fine, because the called function does not return.
# However, `ptxas` does not know that and adds a successor edge to the `exit`
# block, widening the divergence range. In this example, that's not allowed, as
# `bar.sync` cannot be executed divergently on Pascal hardware or earlier.
#
# To avoid these fall-through successors that change the control flow,
# we replace `unreachable` instructions with a call to `trap` and `exit`. This
# informs `ptxas` that the thread exits, and allows it to correctly construct a
# CFG, and consequently correctly determine the divergence regions as intended.
# Note that we first emit a call to `trap`, so that the behaviour is the same
# as before.
function lower_unreachable!(f::LLVM.Function)
mod = LLVM.parent(f)
# TODO:
# - if unreachable blocks have been merged, we still may be jumping from different
# divergent regions, potentially causing the same problem as above:
# entry:
# // start of divergent region 1
# @%p0 bra cont1;
# @%p1 bra throw;
# bra.uni cont1;
# cont1:
# // end of divergent region 1
# bar.sync 0; // is this executed divergently?
# // start of divergent region 2
# @%p2 bra cont2;
# @%p3 bra throw;
# bra.uni cont2;
# cont2:
# // end of divergent region 2
# ...
# throw:
# trap;
# br throw;
# if this is a problem, we probably need to clone blocks with multiple
# predecessors so that there's a unique path from each region of
# divergence to every `unreachable` terminator
# remove `noreturn` attributes, to avoid the (minimal) optimization that
# happens during `prepare_execution!` undoing our work here.
# this shouldn't be needed when we upstream the pass.
attrs = function_attributes(f)
delete!(attrs, EnumAttribute("noreturn", 0))
# find unreachable blocks
unreachable_blocks = BasicBlock[]
for block in blocks(f)
if terminator(block) isa LLVM.UnreachableInst
push!(unreachable_blocks, block)
end
end
isempty(unreachable_blocks) && return false
# inline assembly to exit a thread
exit_ft = LLVM.FunctionType(LLVM.VoidType())
exit = InlineAsm(exit_ft, "exit;", "", true)
trap_ft = LLVM.FunctionType(LLVM.VoidType())
trap = if haskey(functions(mod), "llvm.trap")
functions(mod)["llvm.trap"]
else
LLVM.Function(mod, "llvm.trap", trap_ft)
end
# rewrite the unreachable terminators
@dispose builder=IRBuilder() begin
entry_block = first(blocks(f))
for block in unreachable_blocks
inst = terminator(block)
@assert inst isa LLVM.UnreachableInst
position!(builder, inst)
call!(builder, trap_ft, trap)
call!(builder, exit_ft, exit)
end
end
return true
end
# Replace occurrences of __nvvm_reflect("foo") and llvm.nvvm.reflect with an integer.
#
# NOTE: this is the same as LLVM's NVVMReflect pass, which we cannot use because it is
# not exported. It is meant to be added to a pass pipeline automatically, by
# calling adjustPassManager, but we don't use a PassManagerBuilder so cannot do so.
const NVVM_REFLECT_FUNCTION = "__nvvm_reflect"
function nvvm_reflect!(fun::LLVM.Function)
job = current_job::CompilerJob
mod = LLVM.parent(fun)
changed = false
@timeit_debug to "nvvmreflect" begin
# find and sanity check the nnvm-reflect function
# TODO: also handle the llvm.nvvm.reflect intrinsic
haskey(LLVM.functions(mod), NVVM_REFLECT_FUNCTION) || return false
reflect_function = functions(mod)[NVVM_REFLECT_FUNCTION]
isdeclaration(reflect_function) || error("_reflect function should not have a body")
reflect_typ = return_type(function_type(reflect_function))
isa(reflect_typ, LLVM.IntegerType) || error("_reflect's return type should be integer")
to_remove = []
for use in uses(reflect_function)
call = user(use)
isa(call, LLVM.CallInst) || continue
if length(operands(call)) != 2
@error """Unrecognized format of __nvvm_reflect call:
$(string(call))
Wrong number of operands: expected 2, got $(length(operands(call)))."""
continue
end
# decode the string argument
if LLVM.version() >= v"17"
sym = operands(call)[1]
else
str = operands(call)[1]
if !isa(str, LLVM.ConstantExpr) || opcode(str) != LLVM.API.LLVMGetElementPtr
@safe_error """Unrecognized format of __nvvm_reflect call:
$(string(call))
Operand should be a GEP instruction, got a $(typeof(str)). Please file an issue."""
continue
end
sym = operands(str)[1]
if isa(sym, LLVM.ConstantExpr) && opcode(sym) == LLVM.API.LLVMGetElementPtr
# CUDA 11.0 or below
sym = operands(sym)[1]
end
end
if !isa(sym, LLVM.GlobalVariable)
@safe_error """Unrecognized format of __nvvm_reflect call:
$(string(call))
Operand should be a global variable, got a $(typeof(sym)). Please file an issue."""
continue
end
sym_op = operands(sym)[1]
if !isa(sym_op, LLVM.ConstantArray) && !isa(sym_op, LLVM.ConstantDataArray)
@safe_error """Unrecognized format of __nvvm_reflect call:
$(string(call))
Operand should be a constant array, got a $(typeof(sym_op)). Please file an issue."""
end
chars = convert.(Ref(UInt8), collect(sym_op))
reflect_arg = String(chars[1:end-1])
# handle possible cases
# XXX: put some of these property in the compiler job?
# and/or first set the "nvvm-reflect-*" module flag like Clang does?
fast_math = current_job.config.target.fastmath
# NOTE: we follow nvcc's --use_fast_math
reflect_val = if reflect_arg == "__CUDA_FTZ"
# single-precision denormals support
ConstantInt(reflect_typ, fast_math ? 1 : 0)
elseif reflect_arg == "__CUDA_PREC_DIV"
# single-precision floating-point division and reciprocals.
ConstantInt(reflect_typ, fast_math ? 0 : 1)
elseif reflect_arg == "__CUDA_PREC_SQRT"
# single-precision floating point square roots.
ConstantInt(reflect_typ, fast_math ? 0 : 1)
elseif reflect_arg == "__CUDA_FMAD"
# contraction of floating-point multiplies and adds/subtracts into
# floating-point multiply-add operations (FMAD, FFMA, or DFMA)
ConstantInt(reflect_typ, fast_math ? 1 : 0)
elseif reflect_arg == "__CUDA_ARCH"
ConstantInt(reflect_typ, job.config.target.cap.major*100 + job.config.target.cap.minor*10)
else
@safe_error """Unrecognized format of __nvvm_reflect call:
$(string(call))
Unknown argument $reflect_arg. Please file an issue."""
continue
end
replace_uses!(call, reflect_val)
push!(to_remove, call)
end
# remove the calls to the function
for val in to_remove
@assert isempty(uses(val))
erase!(val)
end
# maybe also delete the function
if isempty(uses(reflect_function))
erase!(reflect_function)
end
end
return changed
end
NVVMReflectPass() = NewPMFunctionPass("custom-nvvm-reflect", nvvm_reflect!)
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 10295 | import InteractiveUtils
using UUIDs
const Cthulhu = Base.PkgId(UUID("f68482b8-f384-11e8-15f7-abe071a5a75f"), "Cthulhu")
#
# syntax highlighting
#
const _pygmentize = Ref{Union{String,Nothing}}()
function pygmentize()
if !isassigned(_pygmentize)
_pygmentize[] = Sys.which("pygmentize")
end
return _pygmentize[]
end
function highlight(io::IO, code, lexer)
highlighter = pygmentize()
have_color = get(io, :color, false)
if !have_color
print(io, code)
elseif lexer == "llvm"
InteractiveUtils.print_llvm(io, code)
elseif highlighter !== nothing
custom_lexer = joinpath(dirname(@__DIR__), "res", "pygments", "$lexer.py")
if isfile(custom_lexer)
lexer = `$custom_lexer -x`
end
pipe = open(`$highlighter -f terminal -P bg=dark -l $lexer`, "r+")
print(pipe, code)
close(pipe.in)
print(io, read(pipe, String))
else
print(io, code)
end
return
end
#
# Compat shims
#
include("reflection_compat.jl")
#
# code_* replacements
#
function code_lowered(@nospecialize(job::CompilerJob); kwargs...)
sig = job.source.specTypes # XXX: can we just use the method instance?
code_lowered_by_type(sig; kwargs...)
end
function code_typed(@nospecialize(job::CompilerJob); interactive::Bool=false, kwargs...)
sig = job.source.specTypes # XXX: can we just use the method instance?
if interactive
# call Cthulhu without introducing a dependency on Cthulhu
mod = get(Base.loaded_modules, Cthulhu, nothing)
mod===nothing && error("Interactive code reflection requires Cthulhu; please install and load this package first.")
interp = get_interpreter(job)
descend_code_typed = getfield(mod, :descend_code_typed)
descend_code_typed(sig; interp, kwargs...)
else
interp = get_interpreter(job)
Base.code_typed_by_type(sig; interp, kwargs...)
end
end
function code_warntype(io::IO, @nospecialize(job::CompilerJob); interactive::Bool=false, kwargs...)
sig = job.source.specTypes # XXX: can we just use the method instance?
if interactive
@assert io == stdout
# call Cthulhu without introducing a dependency on Cthulhu
mod = get(Base.loaded_modules, Cthulhu, nothing)
mod===nothing && error("Interactive code reflection requires Cthulhu; please install and load this package first.")
interp = get_interpreter(job)
descend_code_warntype = getfield(mod, :descend_code_warntype)
descend_code_warntype(sig; interp, kwargs...)
else
interp = get_interpreter(job)
code_warntype_by_type(io, sig; interp, kwargs...)
end
end
code_warntype(@nospecialize(job::CompilerJob); kwargs...) = code_warntype(stdout, job; kwargs...)
InteractiveUtils.code_lowered(err::InvalidIRError; kwargs...) = code_lowered(err.job; kwargs...)
InteractiveUtils.code_typed(err::InvalidIRError; kwargs...) = code_typed(err.job; kwargs...)
InteractiveUtils.code_warntype(err::InvalidIRError; kwargs...) = code_warntype(err.job; kwargs...)
InteractiveUtils.code_lowered(err::KernelError; kwargs...) = code_lowered(err.job; kwargs...)
InteractiveUtils.code_typed(err::KernelError; kwargs...) = code_typed(err.job; kwargs...)
InteractiveUtils.code_warntype(err::KernelError; kwargs...) = code_warntype(err.job; kwargs...)
struct jl_llvmf_dump
TSM::LLVM.API.LLVMOrcThreadSafeModuleRef
F::LLVM.API.LLVMValueRef
end
"""
code_llvm([io], job; optimize=true, raw=false, dump_module=false)
Prints the device LLVM IR generated for the given compiler job to `io` (default `stdout`).
The following keyword arguments are supported:
- `optimize`: determines if the code is optimized, which includes kernel-specific
optimizations if `kernel` is true
- `raw`: return the raw IR including all metadata
- `dump_module`: display the entire module instead of just the function
See also: [`@device_code_llvm`](@ref), `InteractiveUtils.code_llvm`
"""
function code_llvm(io::IO, @nospecialize(job::CompilerJob); optimize::Bool=true, raw::Bool=false,
debuginfo::Symbol=:default, dump_module::Bool=false, kwargs...)
# NOTE: jl_dump_function_ir supports stripping metadata, so don't do it in the driver
str = JuliaContext() do ctx
ir, meta = compile(:llvm, job; optimize=optimize, strip=false, validate=false, kwargs...)
ts_mod = ThreadSafeModule(ir)
entry_fn = meta.entry
GC.@preserve ts_mod entry_fn begin
value = Ref(jl_llvmf_dump(ts_mod.ref, entry_fn.ref))
ccall(:jl_dump_function_ir, Ref{String},
(Ptr{jl_llvmf_dump}, Bool, Bool, Ptr{UInt8}),
value, !raw, dump_module, debuginfo)
end
end
highlight(io, str, "llvm")
end
code_llvm(@nospecialize(job::CompilerJob); kwargs...) = code_llvm(stdout, job; kwargs...)
"""
code_native([io], f, types; cap::VersionNumber, kernel=false, raw=false)
Prints the native assembly generated for the given compiler job to `io` (default `stdout`).
The following keyword arguments are supported:
- `cap` which device to generate code for
- `kernel`: treat the function as an entry-point kernel
- `raw`: return the raw code including all metadata
See also: [`@device_code_native`](@ref), `InteractiveUtils.code_llvm`
"""
function code_native(io::IO, @nospecialize(job::CompilerJob); raw::Bool=false, dump_module::Bool=false)
asm, meta = JuliaContext() do ctx
compile(:asm, job; strip=!raw, only_entry=!dump_module, validate=false)
end
highlight(io, asm, source_code(job.config.target))
end
code_native(@nospecialize(job::CompilerJob); kwargs...) =
code_native(stdout, job; kwargs...)
#
# @device_code_* functions
#
function emit_hooked_compilation(inner_hook, ex...)
user_code = ex[end]
user_kwargs = ex[1:end-1]
quote
# we only want to invoke the hook once for every compilation job
jobs = Set()
function outer_hook(job)
if !in(job, jobs)
# the user hook might invoke the compiler again, so disable the hook
old_hook = $compile_hook[]
try
$compile_hook[] = nothing
$inner_hook(job; $(map(esc, user_kwargs)...))
finally
$compile_hook[] = old_hook
end
push!(jobs, job)
end
end
# now invoke the user code with this hook in place
try
$compile_hook[] = outer_hook
$(esc(user_code))
finally
$compile_hook[] = nothing
end
if isempty(jobs)
error("no kernels executed while evaluating the given expression")
end
nothing
end
end
"""
@device_code_lowered ex
Evaluates the expression `ex` and returns the result of
`InteractiveUtils.code_lowered` for every compiled GPU kernel.
See also: `InteractiveUtils.@code_lowered`
"""
macro device_code_lowered(ex...)
quote
buf = Any[]
function hook(job::CompilerJob)
append!(buf, code_lowered(job))
end
$(emit_hooked_compilation(:hook, ex...))
buf
end
end
"""
@device_code_typed ex
Evaluates the expression `ex` and returns the result of
`InteractiveUtils.code_typed` for every compiled GPU kernel.
See also: `InteractiveUtils.@code_typed`
"""
macro device_code_typed(ex...)
quote
output = Dict{CompilerJob,Any}()
function hook(job::CompilerJob; kwargs...)
output[job] = code_typed(job; kwargs...)
end
$(emit_hooked_compilation(:hook, ex...))
output
end
end
"""
@device_code_warntype [io::IO=stdout] ex
Evaluates the expression `ex` and prints the result of
`InteractiveUtils.code_warntype` to `io` for every compiled GPU kernel.
See also: `InteractiveUtils.@code_warntype`
"""
macro device_code_warntype(ex...)
function hook(job::CompilerJob; io::IO=stdout, kwargs...)
println(io, "$job")
println(io)
code_warntype(io, job; kwargs...)
end
emit_hooked_compilation(hook, ex...)
end
"""
@device_code_llvm [io::IO=stdout, ...] ex
Evaluates the expression `ex` and prints the result of `InteractiveUtils.code_llvm`
to `io` for every compiled GPU kernel. For other supported keywords, see
[`GPUCompiler.code_llvm`](@ref).
See also: InteractiveUtils.@code_llvm
"""
macro device_code_llvm(ex...)
function hook(job::CompilerJob; io::IO=stdout, kwargs...)
println(io, "; $job")
code_llvm(io, job; kwargs...)
end
emit_hooked_compilation(hook, ex...)
end
"""
@device_code_native [io::IO=stdout, ...] ex
Evaluates the expression `ex` and prints the result of [`GPUCompiler.code_native`](@ref) to `io`
for every compiled GPU kernel. For other supported keywords, see
[`GPUCompiler.code_native`](@ref).
"""
macro device_code_native(ex...)
function hook(job::CompilerJob; io::IO=stdout, kwargs...)
println(io, "// $job")
println(io)
code_native(io, job; kwargs...)
end
emit_hooked_compilation(hook, ex...)
end
"""
@device_code dir::AbstractString=... [...] ex
Evaluates the expression `ex` and dumps all intermediate forms of code to the directory
`dir`.
"""
macro device_code(ex...)
localUnique = 1
function hook(job::CompilerJob; dir::AbstractString)
name = job.source.def.name
fn = "$(name)_$(localUnique)"
mkpath(dir)
open(joinpath(dir, "$fn.lowered.jl"), "w") do io
code = only(code_lowered(job))
println(io, code)
end
open(joinpath(dir, "$fn.typed.jl"), "w") do io
code = only(code_typed(job; debuginfo=:source))
println(io, code)
end
open(joinpath(dir, "$fn.unopt.ll"), "w") do io
code_llvm(io, job; dump_module=true, raw=true, optimize=false)
end
open(joinpath(dir, "$fn.opt.ll"), "w") do io
code_llvm(io, job; dump_module=true, raw=true)
end
open(joinpath(dir, "$fn.asm"), "w") do io
code_native(io, job; dump_module=true, raw=true)
end
localUnique += 1
end
emit_hooked_compilation(hook, ex...)
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 4795 | # The content of this file should be upstreamed to Julia proper
using InteractiveUtils: highlighting
using Base: hasgenerator
function method_instances(@nospecialize(tt::Type), world::UInt=Base.get_world_counter())
return map(Core.Compiler.specialize_method, method_matches(tt; world))
end
function code_lowered_by_type(@nospecialize(tt); generated::Bool=true, debuginfo::Symbol=:default)
debuginfo = Base.IRShow.debuginfo(debuginfo)
if debuginfo !== :source && debuginfo !== :none
throw(ArgumentError("'debuginfo' must be either :source or :none"))
end
return map(method_instances(tt)) do m
if generated && hasgenerator(m)
if Base.may_invoke_generator(m)
return ccall(:jl_code_for_staged, Any, (Any,), m)::CodeInfo
else
error("Could not expand generator for `@generated` method ", m, ". ",
"This can happen if the provided argument types (", t, ") are ",
"not leaf types, but the `generated` argument is `true`.")
end
end
code = Base.uncompressed_ir(m.def::Method)
debuginfo === :none && Base.remove_linenums!(code)
return code
end
end
function code_warntype_by_type(io::IO, @nospecialize(tt);
debuginfo::Symbol=:default, optimize::Bool=false, kwargs...)
debuginfo = Base.IRShow.debuginfo(debuginfo)
lineprinter = Base.IRShow.__debuginfo[debuginfo]
for (src, rettype) in Base.code_typed_by_type(tt; optimize, kwargs...)
if !(src isa Core.CodeInfo)
println(io, src)
println(io, " failed to infer")
continue
end
lambda_io::IOContext = io
p = src.parent
nargs::Int = 0
if p isa Core.MethodInstance
println(io, p)
print(io, " from ")
println(io, p.def)
p.def isa Method && (nargs = p.def.nargs)
if !isempty(p.sparam_vals)
println(io, "Static Parameters")
sig = p.def.sig
warn_color = Base.warn_color() # more mild user notification
for i = 1:length(p.sparam_vals)
sig = sig::UnionAll
name = sig.var.name
val = p.sparam_vals[i]
print_highlighted(io::IO, v::String, color::Symbol) =
if highlighting[:warntype]
Base.printstyled(io, v; color)
else
Base.print(io, v)
end
if val isa TypeVar
if val.lb === Union{}
print(io, " ", name, " <: ")
print_highlighted(io, "$(val.ub)", warn_color)
elseif val.ub === Any
print(io, " ", sig.var.name, " >: ")
print_highlighted(io, "$(val.lb)", warn_color)
else
print(io, " ")
print_highlighted(io, "$(val.lb)", warn_color)
print(io, " <: ", sig.var.name, " <: ")
print_highlighted(io, "$(val.ub)", warn_color)
end
elseif val isa typeof(Vararg)
print(io, " ", name, "::")
print_highlighted(io, "Int", warn_color)
else
print(io, " ", sig.var.name, " = ")
print_highlighted(io, "$(val)", :cyan) # show the "good" type
end
println(io)
sig = sig.body
end
end
end
if src.slotnames !== nothing
slotnames = Base.sourceinfo_slotnames(src)
lambda_io = IOContext(lambda_io, :SOURCE_SLOTNAMES => slotnames)
slottypes = src.slottypes
nargs > 0 && println(io, "Arguments")
for i = 1:length(slotnames)
if i == nargs + 1
println(io, "Locals")
end
print(io, " ", slotnames[i])
if isa(slottypes, Vector{Any})
InteractiveUtils.warntype_type_printer(io; type=slottypes[i], used=true)
end
println(io)
end
end
print(io, "Body")
InteractiveUtils.warntype_type_printer(io; type=rettype, used=true)
println(io)
irshow_config = Base.IRShow.IRShowConfig(lineprinter(src), InteractiveUtils.warntype_type_printer)
Base.IRShow.show_ir(lambda_io, src, irshow_config)
println(io)
end
nothing
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 6119 | # compiler support for working with run-time libraries
link_library!(mod::LLVM.Module, lib::LLVM.Module) = link_library!(mod, [lib])
function link_library!(mod::LLVM.Module, libs::Vector{LLVM.Module})
# linking is destructive, so copy the libraries
libs = [copy(lib) for lib in libs]
for lib in libs
link!(mod, lib)
end
end
#
# GPU run-time library
#
## higher-level functionality to work with runtime functions
function LLVM.call!(builder, rt::Runtime.RuntimeMethodInstance, args=LLVM.Value[])
bb = position(builder)
f = LLVM.parent(bb)
mod = LLVM.parent(f)
# get or create a function prototype
if haskey(functions(mod), rt.llvm_name)
f = functions(mod)[rt.llvm_name]
ft = function_type(f)
else
ft = convert(LLVM.FunctionType, rt)
f = LLVM.Function(mod, rt.llvm_name, ft)
end
if !isdeclaration(f) && (rt.name !== :gc_pool_alloc && rt.name !== :report_exception)
# XXX: uses of the gc_pool_alloc intrinsic can be introduced _after_ the runtime
# is linked, as part of the lower_gc_frame! optimization pass.
# XXX: report_exception can also be used after the runtime is linked during
# CUDA/Enzyme nested compilation
error("Calling an intrinsic function that clashes with an existing definition: ",
string(ft), " ", rt.name)
end
# runtime functions are written in Julia, while we're calling from LLVM,
# this often results in argument type mismatches. try to fix some here.
args = LLVM.Value[args...]
if length(args) != length(parameters(ft))
error("Incorrect number of arguments for runtime function: ",
"passing ", length(args), " argument(s) to '", string(ft), " ", rt.name, "'")
end
for (i,arg) in enumerate(args)
if value_type(arg) != parameters(ft)[i]
if (value_type(arg) isa LLVM.PointerType) &&
(parameters(ft)[i] isa LLVM.IntegerType)
# Julia pointers are passed as integers
args[i] = ptrtoint!(builder, args[i], parameters(ft)[i])
else
error("Don't know how to convert ", arg, " argument to ", parameters(ft)[i])
end
end
end
call!(builder, ft, f, args)
end
## functionality to build the runtime library
function emit_function!(mod, config::CompilerConfig, f, method)
tt = Base.to_tuple_type(method.types)
source = generic_methodinstance(f, tt)
new_mod, meta = codegen(:llvm, CompilerJob(source, config); toplevel=false)
ft = function_type(meta.entry)
expected_ft = convert(LLVM.FunctionType, method)
if return_type(ft) != return_type(expected_ft)
error("Invalid return type for runtime function '$(method.name)': expected $(return_type(expected_ft)), got $(return_type(ft))")
end
# recent Julia versions include prototypes for all runtime functions, even if unused
run!(StripDeadPrototypesPass(), new_mod, llvm_machine(config.target))
temp_name = LLVM.name(meta.entry)
link!(mod, new_mod)
entry = functions(mod)[temp_name]
# if a declaration already existed, replace it with the function to avoid aliasing
# (and getting function names like gpu_signal_exception1)
name = method.llvm_name
if haskey(functions(mod), name)
decl = functions(mod)[name]
@assert value_type(decl) == value_type(entry)
replace_uses!(decl, entry)
erase!(decl)
end
LLVM.name!(entry, name)
end
function build_runtime(@nospecialize(job::CompilerJob))
mod = LLVM.Module("GPUCompiler run-time library")
# the compiler job passed into here is identifies the job that requires the runtime.
# derive a job that represents the runtime itself (notably with kernel=false).
config = CompilerConfig(job.config; kernel=false)
for method in values(Runtime.methods)
def = if isa(method.def, Symbol)
isdefined(runtime_module(job), method.def) || continue
getfield(runtime_module(job), method.def)
else
method.def
end
emit_function!(mod, config, typeof(def), method)
end
# we cannot optimize the runtime library, because the code would then be optimized again
# during main compilation (and optimizing twice isn't safe). for example, optimization
# removes Julia address spaces, which would then lead to type mismatches when using
# functions from the runtime library from IR that has not been stripped of AS info.
mod
end
const runtime_lock = ReentrantLock()
@locked function load_runtime(@nospecialize(job::CompilerJob))
global compile_cache
if compile_cache === nothing # during precompilation
return build_runtime(job)
end
lock(runtime_lock) do
slug = runtime_slug(job)
if !supports_typed_pointers(context())
slug *= "-opaque"
end
name = "runtime_$(slug).bc"
path = joinpath(compile_cache, name)
lib = try
if ispath(path)
open(path) do io
parse(LLVM.Module, read(io))
end
end
catch ex
@warn "Failed to load GPU runtime library at $path" exception=(ex, catch_backtrace())
nothing
end
if lib === nothing
@debug "Building the GPU runtime library at $path"
mkpath(compile_cache)
lib = build_runtime(job)
# atomic write to disk
temp_path, io = mktemp(dirname(path); cleanup=false)
write(io, lib)
close(io)
@static if VERSION >= v"1.12.0-DEV.1023"
mv(temp_path, path; force=true)
else
Base.rename(temp_path, path, force=true)
end
end
return lib
end
end
# remove the existing cache
# NOTE: call this function from global scope, so any change triggers recompilation.
function reset_runtime()
lock(runtime_lock) do
rm(compile_cache; recursive=true, force=true)
end
return
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 7465 | # GPU runtime library
#
# This module defines method instances that will be compiled into a target-specific image
# and will be available to the GPU compiler to call after Julia has generated code.
#
# Most functions implement, or are used to support Julia runtime functions that are expected
# by the Julia compiler to be available at run time, e.g., to dynamically allocate memory,
# box values, etc.
module Runtime
using ..GPUCompiler
using LLVM
using LLVM.Interop
## representation of a runtime method instance
struct RuntimeMethodInstance
# either a function defined here, or a symbol to fetch a target-specific definition
def::Union{Function,Symbol}
return_type::Type
types::Tuple
name::Symbol
# LLVM types cannot be cached, so we can't put them in the runtime method instance.
# the actual types are constructed upon accessing them, based on a sentinel value:
# - nothing: construct the LLVM type based on its Julia counterparts
# - function: call this generator to get the type (when more control is needed)
llvm_return_type::Union{Nothing, Function}
llvm_types::Union{Nothing, Function}
llvm_name::String
end
function Base.convert(::Type{LLVM.FunctionType}, rt::RuntimeMethodInstance)
types = if rt.llvm_types === nothing
LLVMType[convert(LLVMType, typ; allow_boxed=true) for typ in rt.types]
else
rt.llvm_types()
end
return_type = if rt.llvm_return_type === nothing
convert(LLVMType, rt.return_type; allow_boxed=true)
else
rt.llvm_return_type()
end
LLVM.FunctionType(return_type, types)
end
const methods = Dict{Symbol,RuntimeMethodInstance}()
function get(name::Symbol)
methods[name]
end
# Register a Julia function `def` as a runtime library function identified by `name`. The
# function will be compiled upon first use for argument types `types` and should return
# `return_type`. Use `Runtime.get(name)` to get a reference to this method instance.
#
# The corresponding LLVM types `llvm_types` and `llvm_return_type` will be deduced from
# their Julia counterparts. To influence that conversion, pass a callable object instead;
# this object will be evaluated at run-time and the returned value will be used instead.
#
# When generating multiple runtime functions from a single definition, make sure to specify
# different values for `name`. The LLVM function name will be deduced from that name, but
# you can always specify `llvm_name` to influence that. Never use an LLVM name that starts
# with `julia_` or the function might clash with other compiled functions.
function compile(def, return_type, types, llvm_return_type=nothing, llvm_types=nothing;
name=isa(def,Symbol) ? def : nameof(def), llvm_name="gpu_$name")
meth = RuntimeMethodInstance(def,
return_type, types, name,
llvm_return_type, llvm_types, llvm_name)
if haskey(methods, name)
error("Runtime function $name has already been registered!")
end
methods[name] = meth
# FIXME: if the function is a symbol, implying it will be specified by the target,
# we won't be able to call this function here or we'll get UndefVarErrors.
# work around that by generating an llvmcall stub. can we do better by
# using the new nonrecursive codegen to handle function lookup ourselves?
if def isa Symbol
args = [gensym() for typ in types]
@eval @inline $def($(args...)) =
ccall($("extern $llvm_name"), llvmcall, $return_type, ($(types...),), $(args...))
end
return
end
## exception handling
# expected functions for exception signalling
compile(:signal_exception, Nothing, ())
# expected functions for simple exception handling
compile(:report_exception, Nothing, (Ptr{Cchar},))
compile(:report_oom, Nothing, (Csize_t,))
# expected functions for verbose exception handling
compile(:report_exception_frame, Nothing, (Cint, Ptr{Cchar}, Ptr{Cchar}, Cint))
compile(:report_exception_name, Nothing, (Ptr{Cchar},))
# NOTE: no throw functions are provided here, but replaced by an LLVM pass instead
# in order to provide some debug information without stack unwinding.
## GC
# FIXME: get rid of this and allow boxed types
T_prjlvalue() = convert(LLVMType, Any; allow_boxed=true)
function gc_pool_alloc(sz::Csize_t)
ptr = malloc(sz)
if ptr == C_NULL
report_oom(sz)
throw(OutOfMemoryError())
end
return unsafe_pointer_to_objref(ptr)
end
compile(gc_pool_alloc, Any, (Csize_t,), T_prjlvalue)
# expected functions for GC support
compile(:malloc, Ptr{Nothing}, (Csize_t,))
## boxing and unboxing
const tag_type = UInt
const tag_size = sizeof(tag_type)
const gc_bits = 0x3 # FIXME
# get the type tag of a type at run-time
@generated function type_tag(::Val{type_name}) where type_name
@dispose ctx=Context() begin
T_tag = convert(LLVMType, tag_type)
T_ptag = LLVM.PointerType(T_tag)
T_pjlvalue = convert(LLVMType, Any; allow_boxed=true)
# create function
llvm_f, _ = create_function(T_tag)
mod = LLVM.parent(llvm_f)
# this isn't really a function, but we abuse it to get the JIT to resolve the address
typ = LLVM.Function(mod, "jl_" * String(type_name) * "_type",
LLVM.FunctionType(T_pjlvalue))
# generate IR
@dispose builder=IRBuilder() begin
entry = BasicBlock(llvm_f, "entry")
position!(builder, entry)
typ_var = bitcast!(builder, typ, T_ptag)
tag = load!(builder, T_tag, typ_var)
ret!(builder, tag)
end
call_function(llvm_f, tag_type)
end
end
# we use `jl_value_ptr`, a Julia pseudo-intrinsic that can be used to box and unbox values
@inline @generated function box(val, ::Val{type_name}) where type_name
sz = sizeof(val)
allocsz = sz + tag_size
# type-tags are ephemeral, so look them up at run time
#tag = unsafe_load(convert(Ptr{tag_type}, type_name))
tag = :( type_tag(Val(type_name)) )
quote
ptr = malloc($(Csize_t(allocsz)))
# store the type tag
ptr = convert(Ptr{tag_type}, ptr)
Core.Intrinsics.pointerset(ptr, $tag | $gc_bits, #=index=# 1, #=align=# $tag_size)
# store the value
ptr = convert(Ptr{$val}, ptr+tag_size)
Core.Intrinsics.pointerset(ptr, val, #=index=# 1, #=align=# $sz)
unsafe_pointer_to_objref(ptr)
end
end
@inline function unbox(obj, ::Type{T}) where T
ptr = ccall(:jl_value_ptr, Ptr{Cvoid}, (Any,), obj)
# load the value
ptr = convert(Ptr{T}, ptr)
Core.Intrinsics.pointerref(ptr, #=index=# 1, #=align=# sizeof(T))
end
# generate functions functions that exist in the Julia runtime (see julia/src/datatype.c)
for (T, t) in [Int8 => :int8, Int16 => :int16, Int32 => :int32, Int64 => :int64,
UInt8 => :uint8, UInt16 => :uint16, UInt32 => :uint32, UInt64 => :uint64,
Bool => :bool, Float32 => :float32, Float64 => :float64]
box_fn = Symbol("box_$t")
unbox_fn = Symbol("unbox_$t")
@eval begin
$box_fn(val) = box($T(val), Val($(QuoteNode(t))))
$unbox_fn(obj) = unbox(obj, $T)
compile($box_fn, Any, ($T,), T_prjlvalue; llvm_name=$"ijl_$box_fn")
compile($unbox_fn, $T, (Any,); llvm_name=$"ijl_$unbox_fn")
end
end
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 10355 | # implementation of the GPUCompiler interfaces for generating SPIR-V code
# https://github.com/llvm/llvm-project/blob/master/clang/lib/Basic/Targets/SPIR.h
# https://github.com/KhronosGroup/LLVM-SPIRV-Backend/blob/master/llvm/docs/SPIR-V-Backend.rst
# https://github.com/KhronosGroup/SPIRV-LLVM-Translator/blob/master/docs/SPIRVRepresentationInLLVM.rst
const SPIRV_LLVM_Translator_unified_jll = LazyModule("SPIRV_LLVM_Translator_unified_jll", UUID("85f0d8ed-5b39-5caa-b1ae-7472de402361"))
const SPIRV_Tools_jll = LazyModule("SPIRV_Tools_jll", UUID("6ac6d60f-d740-5983-97d7-a4482c0689f4"))
## target
export SPIRVCompilerTarget
Base.@kwdef struct SPIRVCompilerTarget <: AbstractCompilerTarget
extensions::Vector{String} = []
supports_fp16::Bool = true
supports_fp64::Bool = true
end
llvm_triple(::SPIRVCompilerTarget) = Int===Int64 ? "spir64-unknown-unknown" : "spirv-unknown-unknown"
# SPIRV is not supported by our LLVM builds, so we can't get a target machine
llvm_machine(::SPIRVCompilerTarget) = nothing
llvm_datalayout(::SPIRVCompilerTarget) = Int===Int64 ?
"e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024" :
"e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024"
## job
# TODO: encode debug build or not in the compiler job
# https://github.com/JuliaGPU/CUDAnative.jl/issues/368
runtime_slug(job::CompilerJob{SPIRVCompilerTarget}) = "spirv"
function finish_module!(job::CompilerJob{SPIRVCompilerTarget}, mod::LLVM.Module, entry::LLVM.Function)
# update calling convention
for f in functions(mod)
# JuliaGPU/GPUCompiler.jl#97
#callconv!(f, LLVM.API.LLVMSPIRFUNCCallConv)
end
if job.config.kernel
callconv!(entry, LLVM.API.LLVMSPIRKERNELCallConv)
end
# HACK: Intel's compute runtime doesn't properly support SPIR-V's byval attribute.
# they do support struct byval, for OpenCL, so wrap byval parameters in a struct.
if job.config.kernel
entry = wrap_byval(job, mod, entry)
end
# add module metadata
## OpenCL 2.0
push!(metadata(mod)["opencl.ocl.version"],
MDNode([ConstantInt(Int32(2)),
ConstantInt(Int32(0))]))
## SPIR-V 1.5
push!(metadata(mod)["opencl.spirv.version"],
MDNode([ConstantInt(Int32(1)),
ConstantInt(Int32(5))]))
return entry
end
function validate_ir(job::CompilerJob{SPIRVCompilerTarget}, mod::LLVM.Module)
errors = IRError[]
# support for half and double depends on the target
if !job.config.target.supports_fp16
append!(errors, check_ir_values(mod, LLVM.HalfType()))
end
if !job.config.target.supports_fp64
append!(errors, check_ir_values(mod, LLVM.DoubleType()))
end
return errors
end
@unlocked function mcgen(job::CompilerJob{SPIRVCompilerTarget}, mod::LLVM.Module,
format=LLVM.API.LLVMAssemblyFile)
# The SPIRV Tools don't handle Julia's debug info, rejecting DW_LANG_Julia...
strip_debuginfo!(mod)
# SPIR-V does not support trap, and has no mechanism to abort compute kernels
# (OpKill is only available in fragment execution mode)
rm_trap!(mod)
# the LLVM to SPIR-V translator does not support the freeze instruction
# (SPIRV-LLVM-Translator#1140)
rm_freeze!(mod)
# translate to SPIR-V
input = tempname(cleanup=false) * ".bc"
translated = tempname(cleanup=false) * ".spv"
options = `--spirv-debug-info-version=ocl-100`
if !isempty(job.config.target.extensions)
str = join(map(ext->"+$ext", job.config.target.extensions), ",")
options = `$options --spirv-ext=$str`
end
write(input, mod)
let cmd = `$(SPIRV_LLVM_Translator_unified_jll.llvm_spirv()) $options -o $translated $input`
proc = run(ignorestatus(cmd))
if !success(proc)
error("""Failed to translate LLVM code to SPIR-V.
If you think this is a bug, please file an issue and attach $(input).""")
end
end
# validate
# XXX: parameterize this on the `validate` driver argument
# XXX: our code currently doesn't pass the validator
#if Base.JLOptions().debug_level >= 2
# cmd = `$(SPIRV_Tools_jll.spirv_val()) $translated`
# proc = run(ignorestatus(cmd))
# if !success(proc)
# error("""Failed to validate generated SPIR-V.
# If you think this is a bug, please file an issue and attach $(input) and $(translated).""")
# end
#end
# optimize
# XXX: parameterize this on the `optimize` driver argument
# XXX: the optimizer segfaults on some of our code
optimized = tempname(cleanup=false) * ".spv"
#let cmd = `$(SPIRV_Tools_jll.spirv_opt()) -O --skip-validation $translated -o $optimized`
# proc = run(ignorestatus(cmd))
# if !success(proc)
# error("""Failed to optimize generated SPIR-V.
# If you think this is a bug, please file an issue and attach $(input) and $(translated).""")
# end
#end
output = if format == LLVM.API.LLVMObjectFile
read(translated)
else
# disassemble
read(`$(SPIRV_Tools_jll.spirv_dis()) $translated`, String)
end
rm(input)
rm(translated)
#rm(optimized)
return output
end
# reimplementation that uses `spirv-dis`, giving much more pleasant output
function code_native(io::IO, job::CompilerJob{SPIRVCompilerTarget}; raw::Bool=false, dump_module::Bool=false)
obj, _ = JuliaContext() do ctx
compile(:obj, job; strip=!raw, only_entry=!dump_module, validate=false)
end
mktemp() do input_path, input_io
write(input_io, obj)
flush(input_io)
disassembler = SPIRV_Tools_jll.spirv_dis()
if io == stdout
run(`$disassembler $input_path`)
else
mktemp() do output_path, output_io
run(`$disassembler $input_path -o $output_path`)
asm = read(output_io, String)
print(io, asm)
end
end
end
end
## LLVM passes
# remove llvm.trap and its uses from a module
function rm_trap!(mod::LLVM.Module)
job = current_job::CompilerJob
changed = false
@timeit_debug to "remove trap" begin
if haskey(functions(mod), "llvm.trap")
trap = functions(mod)["llvm.trap"]
for use in uses(trap)
val = user(use)
if isa(val, LLVM.CallInst)
erase!(val)
changed = true
end
end
@compiler_assert isempty(uses(trap)) job
erase!(trap)
end
end
return changed
end
# remove freeze and replace uses by the original value
# (KhronosGroup/SPIRV-LLVM-Translator#1140)
function rm_freeze!(mod::LLVM.Module)
job = current_job::CompilerJob
changed = false
@timeit_debug to "remove freeze" begin
for f in functions(mod), bb in blocks(f), inst in instructions(bb)
if inst isa LLVM.FreezeInst
orig = first(operands(inst))
replace_uses!(inst, orig)
@compiler_assert isempty(uses(inst)) job
erase!(inst)
changed = true
end
end
end
return changed
end
# wrap byval pointers in a single-value struct
function wrap_byval(@nospecialize(job::CompilerJob), mod::LLVM.Module, f::LLVM.Function)
ft = function_type(f)::LLVM.FunctionType
args = classify_arguments(job, ft)
filter!(args) do arg
arg.cc != GHOST
end
# find the byval parameters
byval = BitVector(undef, length(parameters(ft)))
if LLVM.version() >= v"12"
for i in 1:length(byval)
attrs = collect(parameter_attributes(f, i))
byval[i] = any(attrs) do attr
kind(attr) == kind(TypeAttribute("byval", LLVM.VoidType()))
end
end
else
# XXX: byval is not round-trippable on LLVM < 12 (see maleadt/LLVM.jl#186)
for arg in args
byval[arg.idx] = (arg.cc == BITS_REF)
end
end
# generate the wrapper function type & definition
new_types = LLVM.LLVMType[]
for (i, param) in enumerate(parameters(ft))
typ = if byval[i]
llvm_typ = convert(LLVMType, args[i].typ)
st = LLVM.StructType([llvm_typ])
LLVM.PointerType(st, addrspace(param))
else
param
end
push!(new_types, typ)
end
new_ft = LLVM.FunctionType(return_type(ft), new_types)
new_f = LLVM.Function(mod, "", new_ft)
linkage!(new_f, linkage(f))
for (arg, new_arg) in zip(parameters(f), parameters(new_f))
LLVM.name!(new_arg, LLVM.name(arg))
end
# emit IR performing the "conversions"
new_args = Vector{LLVM.Value}()
@dispose builder=IRBuilder() begin
entry = BasicBlock(new_f, "conversion")
position!(builder, entry)
# perform argument conversions
for (i, param) in enumerate(parameters(new_f))
if byval[i]
llvm_typ = convert(LLVMType, args[i].typ)
ptr = struct_gep!(builder, LLVM.StructType([llvm_typ]), param, 0)
push!(new_args, ptr)
else
push!(new_args, param)
end
end
# map the arguments
value_map = Dict{LLVM.Value, LLVM.Value}(
param => new_args[i] for (i,param) in enumerate(parameters(f))
)
value_map[f] = new_f
clone_into!(new_f, f; value_map,
changes=LLVM.API.LLVMCloneFunctionChangeTypeGlobalChanges)
# apply byval attributes again (`clone_into!` didn't due to the type mismatch)
for i in 1:length(byval)
attrs = parameter_attributes(new_f, i)
if byval[i]
llvm_typ = convert(LLVMType, args[i].typ)
push!(attrs, TypeAttribute("byval", LLVM.StructType([llvm_typ])))
end
end
# fall through
br!(builder, collect(blocks(new_f))[2])
end
# remove the old function
# NOTE: if we ever have legitimate uses of the old function, create a shim instead
fn = LLVM.name(f)
@assert isempty(uses(f))
replace_metadata_uses!(f, new_f)
erase!(f)
LLVM.name!(new_f, fn)
return new_f
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 4082 | defs(mod::LLVM.Module) = filter(f -> !isdeclaration(f), collect(functions(mod)))
decls(mod::LLVM.Module) = filter(f -> isdeclaration(f) && !LLVM.isintrinsic(f),
collect(functions(mod)))
## timings
const to = TimerOutput()
timings() = (TimerOutputs.print_timer(to); println())
enable_timings() = (TimerOutputs.enable_debug_timings(GPUCompiler); return)
## debug verification
should_verify() = ccall(:jl_is_debugbuild, Cint, ()) == 1 ||
Base.JLOptions().debug_level >= 2 ||
parse(Bool, get(ENV, "CI", "false"))
isdebug(group, mod=GPUCompiler) =
Base.CoreLogging.current_logger_for_env(Base.CoreLogging.Debug, group, mod) !== nothing
## lazy module loading
using UUIDs
struct LazyModule
pkg::Base.PkgId
LazyModule(name, uuid) = new(Base.PkgId(uuid, name))
end
function Base.getproperty(lazy_mod::LazyModule, sym::Symbol)
pkg = getfield(lazy_mod, :pkg)
mod = get(Base.loaded_modules, pkg, nothing)
if mod === nothing
error("This functionality requires the $(pkg.name) package, which should be installed and loaded first.")
end
getfield(mod, sym)
end
## safe logging
using Logging
const STDERR_HAS_COLOR = Ref{Bool}(false)
# Prevent invalidation when packages define custom loggers
# Using invoke in combination with @nospecialize eliminates backedges to these methods
function _invoked_min_enabled_level(@nospecialize(logger))
return invoke(Logging.min_enabled_level, Tuple{typeof(logger)}, logger)::LogLevel
end
# define safe loggers for use in generated functions (where task switches are not allowed)
for level in [:debug, :info, :warn, :error]
@eval begin
macro $(Symbol("safe_$level"))(ex...)
macrocall = :(@placeholder $(ex...))
# NOTE: `@placeholder` in order to avoid hard-coding @__LINE__ etc
macrocall.args[1] = Symbol($"@$level")
quote
io = IOContext(Core.stderr, :color=>STDERR_HAS_COLOR[])
# ideally we call Logging.shouldlog() here, but that is likely to yield,
# so instead we rely on the min_enabled_level of the logger.
# in the case of custom loggers that may be an issue, because,
# they may expect Logging.shouldlog() getting called, so we use
# the global_logger()'s min level which is more likely to be usable.
min_level = _invoked_min_enabled_level(global_logger())
with_logger(Logging.ConsoleLogger(io, min_level)) do
$(esc(macrocall))
end
end
end
end
end
macro safe_show(exs...)
blk = Expr(:block)
for ex in exs
push!(blk.args,
:(println(Core.stdout, $(sprint(Base.show_unquoted,ex)*" = "),
repr(begin local value = $(esc(ex)) end))))
end
isempty(exs) || push!(blk.args, :value)
return blk
end
## codegen locking
# lock codegen to prevent races on the LLVM context.
#
# XXX: it's not allowed to switch tasks while under this lock, can we guarantee that?
# its probably easier to start using our own LLVM context when that's possible.
macro locked(ex)
if VERSION >= v"1.12.0-DEV.769"
# no need to handle locking; it's taken care of by the engine
# as long as we use a correct cache owner token.
return esc(ex)
end
def = splitdef(ex)
def[:body] = quote
ccall(:jl_typeinf_lock_begin, Cvoid, ())
try
$(def[:body])
finally
ccall(:jl_typeinf_lock_end, Cvoid, ())
end
end
esc(combinedef(def))
end
# HACK: temporarily unlock again to perform a task switch
macro unlocked(ex)
if VERSION >= v"1.12.0-DEV.769"
return esc(ex)
end
def = splitdef(ex)
def[:body] = quote
ccall(:jl_typeinf_lock_end, Cvoid, ())
try
$(def[:body])
finally
ccall(:jl_typeinf_lock_begin, Cvoid, ())
end
end
esc(combinedef(def))
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 12151 | # validation of properties and code
export InvalidIRError
# TODO: upstream
function method_matches(@nospecialize(tt::Type{<:Tuple}); world::Integer)
methods = Core.MethodMatch[]
matches = Base._methods_by_ftype(tt, -1, world)
matches === nothing && return methods
for match in matches::Vector
push!(methods, match::Core.MethodMatch)
end
return methods
end
function typeinf_type(mi::MethodInstance; interp::CC.AbstractInterpreter)
ty = Core.Compiler.typeinf_type(interp, mi.def, mi.specTypes, mi.sparam_vals)
return something(ty, Any)
end
function check_method(@nospecialize(job::CompilerJob))
ft = job.source.specTypes.parameters[1]
ft <: Core.Builtin && error("$(unsafe_function_from_type(ft)) is not a generic function")
for sparam in job.source.sparam_vals
if sparam isa TypeVar
throw(KernelError(job, "method captures typevar '$sparam' (you probably use an unbound type variable)"))
end
end
# kernels can't return values
if job.config.kernel
rt = typeinf_type(job.source; interp=get_interpreter(job))
if rt != Nothing && rt != Union{}
throw(KernelError(job, "kernel returns a value of type `$rt`",
"""Make sure your kernel function ends in `return`, `return nothing` or `nothing`."""))
end
end
return
end
# The actual check is rather complicated
# and might change from version to version...
function hasfieldcount(@nospecialize(dt))
try
fieldcount(dt)
catch
return false
end
return true
end
function explain_nonisbits(@nospecialize(dt), depth=1; maxdepth=10)
dt===Module && return "" # work around JuliaLang/julia#33347
depth > maxdepth && return ""
hasfieldcount(dt) || return ""
msg = ""
for (ft, fn) in zip(fieldtypes(dt), fieldnames(dt))
if !isbitstype(ft)
msg *= " "^depth * ".$fn is of type $ft which is not isbits.\n"
msg *= explain_nonisbits(ft, depth+1)
end
end
return msg
end
function check_invocation(@nospecialize(job::CompilerJob))
sig = job.source.specTypes
ft = sig.parameters[1]
tt = Tuple{sig.parameters[2:end]...}
Base.isdispatchtuple(tt) || error("$tt is not a dispatch tuple")
# make sure any non-isbits arguments are unused
real_arg_i = 0
for (arg_i,dt) in enumerate(sig.parameters)
isghosttype(dt) && continue
Core.Compiler.isconstType(dt) && continue
real_arg_i += 1
# XXX: can we support these for CPU targets?
if dt <: Core.OpaqueClosure
throw(KernelError(job, "passing an opaque closure",
"""Argument $arg_i to your kernel function is an opaque closure.
This is a CPU-only object not supported by GPUCompiler."""))
end
if !isbitstype(dt)
throw(KernelError(job, "passing and using non-bitstype argument",
"""Argument $arg_i to your kernel function is of type $dt, which is not isbits:
$(explain_nonisbits(dt))"""))
end
end
return
end
## IR validation
const IRError = Tuple{String, StackTraces.StackTrace, Any} # kind, bt, meta
struct InvalidIRError <: Exception
job::CompilerJob
errors::Vector{IRError}
end
const RUNTIME_FUNCTION = "call to the Julia runtime"
const UNKNOWN_FUNCTION = "call to an unknown function"
const POINTER_FUNCTION = "call through a literal pointer"
const CCALL_FUNCTION = "call to an external C function"
const LAZY_FUNCTION = "call to a lazy-initialized function"
const DELAYED_BINDING = "use of an undefined name"
const DYNAMIC_CALL = "dynamic function invocation"
function Base.showerror(io::IO, err::InvalidIRError)
print(io, "InvalidIRError: compiling ", err.job.source, " resulted in invalid LLVM IR")
for (kind, bt, meta) in err.errors
printstyled(io, "\nReason: unsupported $kind"; color=:red)
if meta !== nothing
if kind == RUNTIME_FUNCTION || kind == UNKNOWN_FUNCTION || kind == POINTER_FUNCTION || kind == DYNAMIC_CALL || kind == CCALL_FUNCTION || kind == LAZY_FUNCTION
printstyled(io, " (call to ", meta, ")"; color=:red)
elseif kind == DELAYED_BINDING
printstyled(io, " (use of '", meta, "')"; color=:red)
end
end
Base.show_backtrace(io, bt)
end
println(io)
printstyled(io, "Hint"; bold = true, color = :cyan)
printstyled(
io,
": catch this exception as `err` and call `code_typed(err; interactive = true)` to",
" introspect the erronous code with Cthulhu.jl";
color = :cyan,
)
return
end
function check_ir(job, args...)
errors = check_ir!(job, IRError[], args...)
unique!(errors)
if !isempty(errors)
throw(InvalidIRError(job, errors))
end
return
end
function check_ir!(job, errors::Vector{IRError}, mod::LLVM.Module)
for f in functions(mod)
check_ir!(job, errors, f)
end
# custom validation
append!(errors, validate_ir(job, mod))
return errors
end
function check_ir!(job, errors::Vector{IRError}, f::LLVM.Function)
for bb in blocks(f), inst in instructions(bb)
if isa(inst, LLVM.CallInst)
check_ir!(job, errors, inst)
elseif isa(inst, LLVM.LoadInst)
check_ir!(job, errors, inst)
end
end
return errors
end
const libjulia = Ref{Ptr{Cvoid}}(C_NULL)
function check_ir!(job, errors::Vector{IRError}, inst::LLVM.LoadInst)
bt = backtrace(inst)
src = operands(inst)[1]
if src isa ConstantExpr
if opcode(src) == LLVM.API.LLVMBitCast
src = operands(src)[1]
end
end
if src isa GlobalVariable
name = LLVM.name(src)
if startswith(name, "jlplt_")
try
rx = r"jlplt_(.*)_\d+_got"
name = match(rx, name).captures[1]
push!(errors, (LAZY_FUNCTION, bt, name))
catch e
@safe_debug "Decoding name of PLT entry failed" inst bb=LLVM.parent(inst)
push!(errors, (LAZY_FUNCTION, bt, nothing))
end
end
end
return errors
end
function check_ir!(job, errors::Vector{IRError}, inst::LLVM.CallInst)
bt = backtrace(inst)
dest = called_operand(inst)
if isa(dest, LLVM.Function)
fn = LLVM.name(dest)
# some special handling for runtime functions that we don't implement
if fn == "jl_get_binding_or_error" || fn == "ijl_get_binding_or_error"
try
m, sym = arguments(inst)
sym = first(operands(sym::ConstantExpr))::ConstantInt
sym = convert(Int, sym)
sym = Ptr{Cvoid}(sym)
sym = Base.unsafe_pointer_to_objref(sym)
push!(errors, (DELAYED_BINDING, bt, sym))
catch e
@safe_debug "Decoding arguments to jl_get_binding_or_error failed" inst bb=LLVM.parent(inst)
push!(errors, (DELAYED_BINDING, bt, nothing))
end
elseif fn == "jl_reresolve_binding_value_seqcst" || fn == "ijl_reresolve_binding_value_seqcst"
try
# pry the binding from the IR
expr = arguments(inst)[1]::ConstantExpr
expr = first(operands(expr))::ConstantInt # get rid of inttoptr
ptr = Ptr{Any}(convert(Int, expr))
obj = Base.unsafe_pointer_to_objref(ptr)
push!(errors, (DELAYED_BINDING, bt, obj.globalref))
catch e
@safe_debug "Decoding arguments to jl_reresolve_binding_value_seqcst failed" inst bb=LLVM.parent(inst)
push!(errors, (DELAYED_BINDING, bt, nothing))
end
elseif fn == "jl_invoke" || fn == "ijl_invoke"
try
f, args, nargs, meth = arguments(inst)
meth = first(operands(meth::ConstantExpr))::ConstantInt
meth = convert(Int, meth)
meth = Ptr{Cvoid}(meth)
meth = Base.unsafe_pointer_to_objref(meth)::Core.MethodInstance
push!(errors, (DYNAMIC_CALL, bt, meth.def))
catch e
@safe_debug "Decoding arguments to jl_invoke failed" inst bb=LLVM.parent(inst)
push!(errors, (DYNAMIC_CALL, bt, nothing))
end
elseif fn == "jl_apply_generic" || fn == "ijl_apply_generic"
try
f, args, nargs = arguments(inst)
f = first(operands(f))::ConstantInt # get rid of inttoptr
f = convert(Int, f)
f = Ptr{Cvoid}(f)
f = Base.unsafe_pointer_to_objref(f)
push!(errors, (DYNAMIC_CALL, bt, f))
catch e
@safe_debug "Decoding arguments to jl_apply_generic failed" inst bb=LLVM.parent(inst)
push!(errors, (DYNAMIC_CALL, bt, nothing))
end
elseif fn == "jl_load_and_lookup" || fn == "ijl_load_and_lookup"
try
f_lib, f_name, hnd = arguments(inst)
f_name = first(operands(f_name))::GlobalVariable # get rid of the GEP
name_init = LLVM.initializer(f_name)::ConstantDataSequential
name_value = map(collect(name_init)) do char
convert(UInt8, char)
end |> String
name_value = name_value[1:end-1] # remove trailing \0
push!(errors, (CCALL_FUNCTION, bt, name_value))
catch e
@safe_debug "Decoding arguments to jl_load_and_lookup failed" inst bb=LLVM.parent(inst)
push!(errors, (CCALL_FUNCTION, bt, nothing))
end
# detect calls to undefined functions
elseif isdeclaration(dest) && !LLVM.isintrinsic(dest) && !isintrinsic(job, fn)
# figure out if the function lives in the Julia runtime library
if libjulia[] == C_NULL
paths = filter(Libdl.dllist()) do path
name = splitdir(path)[2]
startswith(name, "libjulia")
end
libjulia[] = Libdl.dlopen(first(paths))
end
if Libdl.dlsym_e(libjulia[], fn) != C_NULL
push!(errors, (RUNTIME_FUNCTION, bt, LLVM.name(dest)))
else
push!(errors, (UNKNOWN_FUNCTION, bt, LLVM.name(dest)))
end
end
elseif isa(dest, InlineAsm)
# let's assume it's valid ASM
elseif isa(dest, ConstantExpr)
# detect calls to literal pointers
if opcode(dest) == LLVM.API.LLVMIntToPtr
# extract the literal pointer
ptr_arg = first(operands(dest))
@compiler_assert isa(ptr_arg, ConstantInt) job
ptr_val = convert(Int, ptr_arg)
ptr = Ptr{Cvoid}(ptr_val)
if !valid_function_pointer(job, ptr)
# look it up in the Julia JIT cache
frames = ccall(:jl_lookup_code_address, Any, (Ptr{Cvoid}, Cint,), ptr, 0)
# XXX: what if multiple frames are returned? rare, but happens
if length(frames) == 1
fn, file, line, linfo, fromC, inlined = last(frames)
push!(errors, (POINTER_FUNCTION, bt, fn))
else
push!(errors, (POINTER_FUNCTION, bt, nothing))
end
end
end
end
return errors
end
# helper function to check for illegal values in an LLVM module
function check_ir_values(mod::LLVM.Module, predicate, msg="value")
errors = IRError[]
for fun in functions(mod), bb in blocks(fun), inst in instructions(bb)
if predicate(inst) || any(predicate, operands(inst))
bt = backtrace(inst)
push!(errors, (msg, bt, inst))
end
end
return errors
end
## shorthand to check for illegal value types
function check_ir_values(mod::LLVM.Module, T_bad::LLVMType)
check_ir_values(mod, val -> value_type(val) == T_bad, "use of $(string(T_bad)) value")
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 68 | @testitem "Aqua" begin
using Aqua
Aqua.test_all(GPUCompiler)
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 1400 | @testitem "BPF" setup=[BPF, Helpers] begin
############################################################################################
@testset "No-op" begin
kernel() = 0
output = sprint(io->BPF.code_native(io, kernel, ()))
@test occursin("\tr0 = 0\n\texit", output)
end
@testset "Return argument" begin
kernel(x) = x
output = sprint(io->BPF.code_native(io, kernel, (UInt64,)))
@test occursin("\tr0 = r1\n\texit", output)
end
@testset "Addition" begin
kernel(x) = x+1
output = sprint(io->BPF.code_native(io, kernel, (UInt64,)))
@test occursin("\tr0 = r1\n\tr0 += 1\n\texit", output)
end
@testset "Errors" begin
kernel(x) = fakefunc(x)
@test_throws GPUCompiler.InvalidIRError BPF.code_execution(kernel, (UInt64,))
end
@testset "Function Pointers" begin
@testset "valid" begin
goodcall(x) = Base.llvmcall("%2 = call i64 inttoptr (i64 3 to i64 (i64)*)(i64 %0)\nret i64 %2", Int, Tuple{Int}, x)
kernel(x) = goodcall(x)
output = sprint(io->BPF.code_native(io, kernel, (Int,)))
@test occursin(r"\tcall .*\n\texit", output)
end
@testset "invalid" begin
badcall(x) = Base.llvmcall("%2 = call i64 inttoptr (i64 3000 to i64 (i64)*)(i64 %0)\nret i64 %2", Int, Tuple{Int}, x)
kernel(x) = badcall(x)
@test_throws GPUCompiler.InvalidIRError BPF.code_execution(kernel, (Int,))
end
end
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 1336 | @testsetup module BPF
using GPUCompiler
# create a native test compiler, and generate reflection methods for it
include("runtime.jl")
struct CompilerParams <: AbstractCompilerParams end
GPUCompiler.runtime_module(::CompilerJob{<:Any,CompilerParams}) = TestRuntime
function create_job(@nospecialize(func), @nospecialize(types);
kernel::Bool=false, always_inline=false, kwargs...)
source = methodinstance(typeof(func), Base.to_tuple_type(types), Base.get_world_counter())
target = BPFCompilerTarget()
params = CompilerParams()
config = CompilerConfig(target, params; kernel, always_inline)
CompilerJob(source, config), kwargs
end
function code_llvm(io::IO, @nospecialize(func), @nospecialize(types); kwargs...)
job, kwargs = create_job(func, types; kwargs...)
GPUCompiler.code_llvm(io, job; kwargs...)
end
function code_native(io::IO, @nospecialize(func), @nospecialize(types); kwargs...)
job, kwargs = create_job(func, types; kwargs...)
GPUCompiler.code_native(io, job; kwargs...)
end
# simulates codegen for a kernel function: validates by default
function code_execution(@nospecialize(func), @nospecialize(types); kwargs...)
job, kwargs = create_job(func, types; kwargs...)
JuliaContext() do ctx
GPUCompiler.compile(:asm, job; kwargs...)
end
end
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 726 | @testitem "examples" begin
function find_sources(path::String, sources=String[])
if isdir(path)
for entry in readdir(path)
find_sources(joinpath(path, entry), sources)
end
elseif endswith(path, ".jl")
push!(sources, path)
end
sources
end
dir = joinpath(@__DIR__, "..", "examples")
files = find_sources(dir)
filter!(file -> readline(file) != "# EXCLUDE FROM TESTING", files)
filter!(file -> !occursin("Kaleidoscope", file), files)
cd(dir) do
examples = relpath.(files, Ref(dir))
@testset for example in examples
cmd = `$(Base.julia_cmd()) --project=$(Base.active_project())`
@test success(pipeline(`$cmd $example`, stderr=stderr))
end
end
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 7060 | @testitem "GCN" setup=[GCN, Helpers] begin
@inline sink_gcn(i) = sink(i, Val(5))
@test GCNCompilerTarget(dev_isa="gfx900") == GCNCompilerTarget("gfx900")
############################################################################################
@testset "IR" begin
@testset "kernel calling convention" begin
kernel() = return
ir = sprint(io->GCN.code_llvm(io, kernel, Tuple{}; dump_module=true))
@test !occursin("amdgpu_kernel", ir)
ir = sprint(io->GCN.code_llvm(io, kernel, Tuple{};
dump_module=true, kernel=true))
@test occursin("amdgpu_kernel", ir)
end
end
############################################################################################
@testset "assembly" begin
@testset "skip scalar trap" begin
workitem_idx_x() = ccall("llvm.amdgcn.workitem.id.x", llvmcall, Int32, ())
trap() = ccall("llvm.trap", llvmcall, Nothing, ())
function kernel()
if workitem_idx_x() > 1
trap()
end
return
end
asm = sprint(io->GCN.code_native(io, kernel, Tuple{}))
@test occursin("s_trap 2", asm)
@test_skip occursin("s_cbranch_execz", asm)
if Base.libllvm_version < v"9"
@test_broken occursin("v_readfirstlane", asm)
end
end
@testset "child functions" begin
# we often test using @noinline child functions, so test whether these survive
# (despite not having side-effects)
@noinline child(i) = sink_gcn(i)
function parent(i)
child(i)
return
end
asm = sprint(io->GCN.code_native(io, parent, Tuple{Int64}; dump_module=true))
@test occursin(r"s_add_u32.*julia_child_.*@rel32@", asm)
@test occursin(r"s_addc_u32.*julia_child_.*@rel32@", asm)
end
@testset "kernel functions" begin
@noinline nonentry(i) = sink_gcn(i)
function entry(i)
nonentry(i)
return
end
asm = sprint(io->GCN.code_native(io, entry, Tuple{Int64}; dump_module=true, kernel=true))
@test occursin(r"\.amdhsa_kernel \w*entry", asm)
@test !occursin(r"\.amdhsa_kernel \w*nonentry", asm)
@test occursin(r"\.type.*\w*nonentry\w*,@function", asm)
end
@testset "child function reuse" begin
# bug: depending on a child function from multiple parents resulted in
# the child only being present once
mod = @eval module $(gensym())
export child, parent1, parent2
@noinline child(i) = sink_gcn(i)
function parent1(i)
child(i)
return
end
function parent2(i)
child(i+1)
return
end
end
asm = sprint(io->GCN.code_native(io, mod.parent1, Tuple{Int}; dump_module=true))
@test occursin(r"\.type.*julia_[[:alnum:]_.]*child_\d*,@function", asm)
asm = sprint(io->GCN.code_native(io, mod.parent2, Tuple{Int}; dump_module=true))
@test occursin(r"\.type.*julia_[[:alnum:]_.]*child_\d*,@function", asm)
end
@testset "child function reuse bis" begin
# bug: similar, but slightly different issue as above
# in the case of two child functions
mod = @eval module $(gensym())
export parent1, parent2, child1, child2
@noinline child1(i) = sink_gcn(i)
@noinline child2(i) = sink_gcn(i+1)
function parent1(i)
child1(i) + child2(i)
return
end
function parent2(i)
child1(i+1) + child2(i+1)
return
end
end
asm = sprint(io->GCN.code_native(io, mod.parent1, Tuple{Int}; dump_module=true))
@test occursin(r"\.type.*julia_[[:alnum:]_.]*child1_\d*,@function", asm)
@test occursin(r"\.type.*julia_[[:alnum:]_.]*child2_\d*,@function", asm)
asm = sprint(io->GCN.code_native(io, mod.parent2, Tuple{Int}; dump_module=true))
@test occursin(r"\.type.*julia_[[:alnum:]_.]*child1_\d*,@function", asm)
@test occursin(r"\.type.*julia_[[:alnum:]_.]*child2_\d*,@function", asm)
end
@testset "indirect sysimg function use" begin
# issue #9: re-using sysimg functions should force recompilation
# (host fldmod1->mod1 throws, so the GCN code shouldn't contain a throw)
# NOTE: Int32 to test for #49
function kernel(out)
wid, lane = fldmod1(unsafe_load(out), Int32(32))
unsafe_store!(out, wid)
return
end
asm = sprint(io->GCN.code_native(io, kernel, Tuple{Ptr{Int32}}))
@test !occursin("jl_throw", asm)
@test !occursin("jl_invoke", asm) # forced recompilation should still not invoke
end
@testset "LLVM intrinsics" begin
# issue #13 (a): cannot select trunc
function kernel(x)
unsafe_trunc(Int, x)
return
end
GCN.code_native(devnull, kernel, Tuple{Float64})
@test "We did not crash!" != ""
end
# FIXME: _ZNK4llvm14TargetLowering20scalarizeVectorStoreEPNS_11StoreSDNodeERNS_12SelectionDAGE
false && @testset "exception arguments" begin
function kernel(a)
unsafe_store!(a, trunc(Int, unsafe_load(a)))
return
end
GCN.code_native(devnull, kernel, Tuple{Ptr{Float64}})
end
# FIXME: in function julia_inner_18528 void (%jl_value_t addrspace(10)*): invalid addrspacecast
false && @testset "GC and TLS lowering" begin
mod = @eval module $(gensym())
mutable struct PleaseAllocate
y::Csize_t
end
# common pattern in Julia 0.7: outlined throw to avoid a GC frame in the calling code
@noinline function inner(x)
sink(x.y)
nothing
end
function kernel(i)
inner(PleaseAllocate(Csize_t(42)))
nothing
end
end
asm = sprint(io->GCN.code_native(io, mod.kernel, Tuple{Int}))
@test occursin("gpu_gc_pool_alloc", asm)
@test !occursin("julia.push_gc_frame", asm)
@test !occursin("julia.pop_gc_frame", asm)
@test !occursin("julia.get_gc_frame_slot", asm)
@test !occursin("julia.new_gc_frame", asm)
# make sure that we can still ellide allocations
function ref_kernel(ptr, i)
data = Ref{Int64}()
data[] = 0
if i > 1
data[] = 1
else
data[] = 2
end
unsafe_store!(ptr, data[], i)
return nothing
end
asm = sprint(io->GCN.code_native(io, ref_kernel, Tuple{Ptr{Int64}, Int}))
@test !occursin("gpu_gc_pool_alloc", asm)
end
@testset "float boxes" begin
function kernel(a,b)
c = Int32(a)
# the conversion to Int32 may fail, in which case the input Float32 is boxed in order to
# pass it to the @nospecialize exception constructor. we should really avoid that (eg.
# by avoiding @nospecialize, or optimize the unused arguments away), but for now the box
# should just work.
unsafe_store!(b, c)
return
end
ir = sprint(io->GCN.code_llvm(io, kernel, Tuple{Float32,Ptr{Float32}}))
@test occursin("jl_box_float32", ir)
GCN.code_native(devnull, kernel, Tuple{Float32,Ptr{Float32}})
end
end
############################################################################################
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 1733 | @testsetup module GCN
using GPUCompiler
# create a GCN-based test compiler, and generate reflection methods for it
include("runtime.jl")
struct CompilerParams <: AbstractCompilerParams end
GPUCompiler.runtime_module(::CompilerJob{<:Any,CompilerParams}) = TestRuntime
function create_job(@nospecialize(func), @nospecialize(types);
kernel::Bool=false, always_inline=false, kwargs...)
source = methodinstance(typeof(func), Base.to_tuple_type(types), Base.get_world_counter())
target = GCNCompilerTarget(dev_isa="gfx900")
params = CompilerParams()
config = CompilerConfig(target, params; kernel, always_inline)
CompilerJob(source, config), kwargs
end
function code_typed(@nospecialize(func), @nospecialize(types); kwargs...)
job, kwargs = create_job(func, types; kwargs...)
GPUCompiler.code_typed(job; kwargs...)
end
function code_warntype(io::IO, @nospecialize(func), @nospecialize(types); kwargs...)
job, kwargs = create_job(func, types; kwargs...)
GPUCompiler.code_warntype(io, job; kwargs...)
end
function code_llvm(io::IO, @nospecialize(func), @nospecialize(types); kwargs...)
job, kwargs = create_job(func, types; kwargs...)
GPUCompiler.code_llvm(io, job; kwargs...)
end
function code_native(io::IO, @nospecialize(func), @nospecialize(types); kwargs...)
job, kwargs = create_job(func, types; kwargs...)
GPUCompiler.code_native(io, job; kwargs...)
end
# simulates codegen for a kernel function: validates by default
function code_execution(@nospecialize(func), @nospecialize(types); kwargs...)
job, kwargs = create_job(func, types; kernel=true, kwargs...)
JuliaContext() do ctx
GPUCompiler.compile(:asm, job; kwargs...)
end
end
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 1670 | @testsetup module Helpers
using Test
export @test_throws_message, sink
# @test_throw, with additional testing for the exception message
macro test_throws_message(f, typ, ex...)
quote
msg = ""
@test_throws $(esc(typ)) try
$(esc(ex...))
catch err
msg = sprint(showerror, err)
rethrow()
end
if !$(esc(f))(msg)
# @test should return its result, but doesn't
errmsg = "Failed to validate error message\n" * msg
@error errmsg
end
@test $(esc(f))(msg)
end
end
# helper function for sinking a value to prevent the callee from getting optimized away
@inline @generated function sink(i::T, ::Val{addrspace}=Val(0)) where {T <: Union{Int32,UInt32}, addrspace}
as_str = addrspace > 0 ? " addrspace($addrspace)" : ""
llvmcall_str = """%slot = alloca i32$(addrspace > 0 ? ", addrspace($addrspace)" : "")
store volatile i32 %0, i32$(as_str)* %slot
%value = load volatile i32, i32$(as_str)* %slot
ret i32 %value"""
return :(Base.llvmcall($llvmcall_str, T, Tuple{T}, i))
end
@inline @generated function sink(i::T, ::Val{addrspace}=Val(0)) where {T <: Union{Int64,UInt64}, addrspace}
as_str = addrspace > 0 ? " addrspace($addrspace)" : ""
llvmcall_str = """%slot = alloca i64$(addrspace > 0 ? ", addrspace($addrspace)" : "")
store volatile i64 %0, i64$(as_str)* %slot
%value = load volatile i64, i64$(as_str)* %slot
ret i64 %value"""
return :(Base.llvmcall($llvmcall_str, T, Tuple{T}, i))
end
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 4619 | @testitem "Metal" setup=[Metal, Helpers] begin
using LLVM
############################################################################################
@testset "IR" begin
@testset "kernel functions" begin
@testset "byref aggregates" begin
kernel(x) = return
ir = sprint(io->Metal.code_llvm(io, kernel, Tuple{Tuple{Int}}))
@test occursin(r"@\w*kernel\w*\(({ i64 }|\[1 x i64\])\*", ir) ||
occursin(r"@\w*kernel\w*\(ptr", ir)
# for kernels, every pointer argument needs to take an address space
ir = sprint(io->Metal.code_llvm(io, kernel, Tuple{Tuple{Int}}; kernel=true))
@test occursin(r"@\w*kernel\w*\(({ i64 }|\[1 x i64\]) addrspace\(1\)\*", ir) ||
occursin(r"@\w*kernel\w*\(ptr addrspace\(1\)", ir)
end
@testset "byref primitives" begin
kernel(x) = return
ir = sprint(io->Metal.code_llvm(io, kernel, Tuple{Int}))
@test occursin(r"@\w*kernel\w*\(i64 ", ir)
# for kernels, every pointer argument needs to take an address space
ir = sprint(io->Metal.code_llvm(io, kernel, Tuple{Int}; kernel=true))
@test occursin(r"@\w*kernel\w*\(i64 addrspace\(1\)\*", ir) ||
occursin(r"@\w*kernel\w*\(ptr addrspace\(1\)", ir)
end
@testset "module metadata" begin
kernel() = return
ir = sprint(io->Metal.code_llvm(io, kernel, Tuple{};
dump_module=true, kernel=true))
@test occursin("air.version", ir)
@test occursin("air.language_version", ir)
@test occursin("air.max_device_buffers", ir)
end
@testset "argument metadata" begin
kernel(x) = return
ir = sprint(io->Metal.code_llvm(io, kernel, Tuple{Int};
dump_module=true, kernel=true))
@test occursin("air.buffer", ir)
# XXX: perform more exhaustive testing of argument passing metadata here,
# or just defer to execution testing in Metal.jl?
end
@testset "input arguments" begin
function kernel(ptr)
idx = ccall("extern julia.air.thread_position_in_threadgroup.i32", llvmcall, UInt32, ()) + 1
unsafe_store!(ptr, 42, idx)
return
end
ir = sprint(io->Metal.code_llvm(io, kernel, Tuple{Core.LLVMPtr{Int,1}}))
@test occursin(r"@\w*kernel\w*\(.* addrspace\(1\)\* %.+\)", ir) ||
occursin(r"@\w*kernel\w*\(ptr addrspace\(1\) %.+\)", ir)
@test occursin(r"call i32 @julia.air.thread_position_in_threadgroup.i32", ir)
ir = sprint(io->Metal.code_llvm(io, kernel, Tuple{Core.LLVMPtr{Int,1}}; kernel=true))
@test occursin(r"@\w*kernel\w*\(.* addrspace\(1\)\* %.+, i32 %thread_position_in_threadgroup\)", ir) ||
occursin(r"@\w*kernel\w*\(ptr addrspace\(1\) %.+, i32 %thread_position_in_threadgroup\)", ir)
@test !occursin(r"call i32 @julia.air.thread_position_in_threadgroup.i32", ir)
end
@testset "vector intrinsics" begin
foo(x, y) = ccall("llvm.smax.v2i64", llvmcall, NTuple{2, VecElement{Int64}},
(NTuple{2, VecElement{Int64}}, NTuple{2, VecElement{Int64}}), x, y)
ir = sprint(io->Metal.code_llvm(io, foo, (NTuple{2, VecElement{Int64}}, NTuple{2, VecElement{Int64}})))
@test occursin("air.max.s.v2i64", ir)
end
@testset "unsupported type detection" begin
function kernel1(ptr)
buf = reinterpret(Ptr{Float32}, ptr)
val = unsafe_load(buf)
dval = Cdouble(val)
# ccall("extern metal_os_log", llvmcall, Nothing, (Float64,), dval)
Base.llvmcall(("""
declare void @llvm.va_start(i8*)
declare void @llvm.va_end(i8*)
declare void @air.os_log(i8*, i64)
define void @metal_os_log(...) {
%1 = alloca i8*
%2 = bitcast i8** %1 to i8*
call void @llvm.va_start(i8* %2)
%3 = load i8*, i8** %1
call void @air.os_log(i8* %3, i64 8)
call void @llvm.va_end(i8* %2)
ret void
}
define void @entry(double %val) #0 {
call void (...) @metal_os_log(double %val)
ret void
}
attributes #0 = { alwaysinline }""", "entry"),
Nothing, Tuple{Float64}, dval)
return
end
ir = sprint(io->Metal.code_llvm(io, kernel1, Tuple{Core.LLVMPtr{Float32,1}}; validate=true))
@test occursin("@metal_os_log", ir)
function kernel2(ptr)
val = unsafe_load(ptr)
res = val * val
unsafe_store!(ptr, res)
return
end
@test_throws_message(InvalidIRError, Metal.code_llvm(devnull, kernel2, Tuple{Core.LLVMPtr{Float64,1}}; validate=true)) do msg
occursin("unsupported use of double value", msg)
end
end
end
end
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 1761 | @testsetup module Metal
using GPUCompiler
# create a Metal test compiler, and generate reflection methods for it
include("runtime.jl")
struct CompilerParams <: AbstractCompilerParams end
GPUCompiler.runtime_module(::CompilerJob{<:Any,CompilerParams}) = TestRuntime
function create_job(@nospecialize(func), @nospecialize(types);
kernel::Bool=false, always_inline=false, kwargs...)
source = methodinstance(typeof(func), Base.to_tuple_type(types), Base.get_world_counter())
target = MetalCompilerTarget(; macos=v"12.2", metal=v"3.0", air=v"3.0")
params = CompilerParams()
config = CompilerConfig(target, params; kernel, always_inline)
CompilerJob(source, config), kwargs
end
function code_typed(@nospecialize(func), @nospecialize(types); kwargs...)
job, kwargs = create_job(func, types; kwargs...)
GPUCompiler.code_typed(job; kwargs...)
end
function code_warntype(io::IO, @nospecialize(func), @nospecialize(types); kwargs...)
job, kwargs = create_job(func, types; kwargs...)
GPUCompiler.code_warntype(io, job; kwargs...)
end
function code_llvm(io::IO, @nospecialize(func), @nospecialize(types); kwargs...)
job, kwargs = create_job(func, types; kwargs...)
GPUCompiler.code_llvm(io, job; kwargs...)
end
function code_native(io::IO, @nospecialize(func), @nospecialize(types); kwargs...)
job, kwargs = create_job(func, types; kwargs...)
GPUCompiler.code_native(io, job; kwargs...)
end
# simulates codegen for a kernel function: validates by default
function code_execution(@nospecialize(func), @nospecialize(types); kwargs...)
job, kwargs = create_job(func, types; kernel=true, kwargs...)
JuliaContext() do ctx
GPUCompiler.compile(:asm, job; kwargs...)
end
end
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 20886 | @testitem "native" setup=[Native, Helpers] begin
using Test
############################################################################################
@testset "reflection" begin
job, _ = Native.create_job(identity, (Int,))
@test only(GPUCompiler.code_lowered(job)) isa Core.CodeInfo
ci, rt = only(GPUCompiler.code_typed(job))
@test rt === Int
ir = sprint(io->GPUCompiler.code_warntype(io, job))
@test contains(ir, "MethodInstance for identity")
ir = sprint(io->GPUCompiler.code_llvm(io, job))
@test contains(ir, "julia_identity")
asm = sprint(io->GPUCompiler.code_native(io, job))
@test contains(asm, "julia_identity")
end
@testset "compilation" begin
@testset "callable structs" begin
struct MyCallable end
(::MyCallable)(a, b) = a+b
(ci, rt) = Native.code_typed(MyCallable(), (Int, Int), kernel=false)[1]
@test ci.slottypes[1] == Core.Compiler.Const(MyCallable())
end
@testset "compilation database" begin
@noinline inner(x) = x+1
function outer(x)
return inner(x)
end
job, _ = Native.create_job(outer, (Int,))
JuliaContext() do ctx
ir, meta = GPUCompiler.compile(:llvm, job)
meth = only(methods(outer, (Int,)))
mis = filter(mi->mi.def == meth, keys(meta.compiled))
@test length(mis) == 1
other_mis = filter(mi->mi.def != meth, keys(meta.compiled))
@test length(other_mis) == 1
@test only(other_mis).def in methods(inner)
end
end
@testset "advanced database" begin
@noinline inner(x) = x+1
foo(x) = sum(inner, fill(x, 10, 10))
job, _ = Native.create_job(foo, (Float64,))
JuliaContext() do ctx
# shouldn't segfault
ir, meta = GPUCompiler.compile(:llvm, job; validate=false)
meth = only(methods(foo, (Float64,)))
mis = filter(mi->mi.def == meth, keys(meta.compiled))
@test length(mis) == 1
inner_methods = filter(keys(meta.compiled)) do mi
mi.def in methods(inner) && mi.specTypes == Tuple{typeof(inner), Float64}
end
@test length(inner_methods) == 1
end
end
@testset "cached compilation" begin
@gensym child kernel unrelated
@eval @noinline $child(i) = i
@eval $kernel(i) = $child(i)+1
# smoke test
job, _ = Native.create_job(eval(kernel), (Int64,))
ir = sprint(io->GPUCompiler.code_llvm(io, job))
@test contains(ir, r"add i64 %\d+, 1")
# basic redefinition
@eval $kernel(i) = $child(i)+2
job, _ = Native.create_job(eval(kernel), (Int64,))
ir = sprint(io->GPUCompiler.code_llvm(io, job))
@test contains(ir, r"add i64 %\d+, 2")
# cached_compilation interface
invocations = Ref(0)
function compiler(job)
invocations[] += 1
ir = sprint(io->GPUCompiler.code_llvm(io, job))
return ir
end
linker(job, compiled) = compiled
cache = Dict()
ft = typeof(eval(kernel))
tt = Tuple{Int64}
# initial compilation
source = methodinstance(ft, tt, Base.get_world_counter())
ir = Base.invokelatest(GPUCompiler.cached_compilation, cache, source, job.config, compiler, linker)
@test contains(ir, r"add i64 %\d+, 2")
@test invocations[] == 1
# cached compilation
ir = Base.invokelatest(GPUCompiler.cached_compilation, cache, source, job.config, compiler, linker)
@test contains(ir, r"add i64 %\d+, 2")
@test invocations[] == 1
# redefinition
@eval $kernel(i) = $child(i)+3
source = methodinstance(ft, tt, Base.get_world_counter())
ir = Base.invokelatest(GPUCompiler.cached_compilation, cache, source, job.config, compiler, linker)
@test contains(ir, r"add i64 %\d+, 3")
@test invocations[] == 2
# cached compilation
ir = Base.invokelatest(GPUCompiler.cached_compilation, cache, source, job.config, compiler, linker)
@test contains(ir, r"add i64 %\d+, 3")
@test invocations[] == 2
# redefinition of an unrelated function
@eval $unrelated(i) = 42
ir = Base.invokelatest(GPUCompiler.cached_compilation, cache, source, job.config, compiler, linker)
@test invocations[] == 2
# redefining child functions
@eval @noinline $child(i) = i+1
ir = Base.invokelatest(GPUCompiler.cached_compilation, cache, source, job.config, compiler, linker)
@test invocations[] == 3
# cached compilation
ir = Base.invokelatest(GPUCompiler.cached_compilation, cache, source, job.config, compiler, linker)
@test invocations[] == 3
# change in configuration
config = CompilerConfig(job.config; name="foobar")
ir = Base.invokelatest(GPUCompiler.cached_compilation, cache, source, config, compiler, linker)
@test invocations[] == 4
@test contains(ir, "foobar")
# tasks running in the background should keep on using the old version
c1, c2 = Condition(), Condition()
function background(job)
local_source = methodinstance(ft, tt, Base.get_world_counter())
notify(c1)
wait(c2) # wait for redefinition
GPUCompiler.cached_compilation(cache, local_source, job.config, compiler, linker)
end
t = @async Base.invokelatest(background, job)
wait(c1) # make sure the task has started
@eval $kernel(i) = $child(i)+4
source = methodinstance(ft, tt, Base.get_world_counter())
ir = Base.invokelatest(GPUCompiler.cached_compilation, cache, source, job.config, compiler, linker)
@test contains(ir, r"add i64 %\d+, 4")
notify(c2) # wake up the task
ir = fetch(t)
@test contains(ir, r"add i64 %\d+, 3")
end
end
############################################################################################
@testset "IR" begin
@testset "basic reflection" begin
valid_kernel() = return
invalid_kernel() = 1
ir = sprint(io->Native.code_llvm(io, valid_kernel, Tuple{}; optimize=false, dump_module=true))
# module should contain our function + a generic call wrapper
@test occursin(r"define\ .* void\ @.*julia_valid_kernel.*\(\)"x, ir)
@test !occursin("define %jl_value_t* @jlcall_", ir)
# there should be no debug metadata
@test !occursin("!dbg", ir)
@test Native.code_llvm(devnull, invalid_kernel, Tuple{}) == nothing
@test_throws KernelError Native.code_llvm(devnull, invalid_kernel, Tuple{}; kernel=true) == nothing
end
@testset "unbound typevars" begin
invalid_kernel() where {unbound} = return
@test_throws KernelError Native.code_llvm(devnull, invalid_kernel, Tuple{})
end
@testset "child functions" begin
# we often test using `@noinline sink` child functions, so test whether these survive
@noinline child(i) = sink(i)
parent(i) = child(i)
ir = sprint(io->Native.code_llvm(io, parent, Tuple{Int}))
@test occursin(r"call .+ @julia.+child.+", ir)
end
@testset "sysimg" begin
# bug: use a system image function
function foobar(a,i)
Base.pointerset(a, 0, mod1(i,10), 8)
end
ir = sprint(io->Native.code_llvm(io, foobar, Tuple{Ptr{Int},Int}))
@test !occursin("jlsys_", ir)
end
@testset "tracked pointers" begin
function kernel(a)
a[1] = 1
return
end
# this used to throw an LLVM assertion (#223)
Native.code_llvm(devnull, kernel, Tuple{Vector{Int}}; kernel=true)
@test "We did not crash!" != ""
end
@testset "CUDAnative.jl#278" begin
# codegen idempotency
# NOTE: this isn't fixed, but surfaces here due to bad inference of checked_sub
# NOTE: with the fix to print_to_string this doesn't error anymore,
# but still have a test to make sure it doesn't regress
Native.code_llvm(devnull, Base.checked_sub, Tuple{Int,Int}; optimize=false)
Native.code_llvm(devnull, Base.checked_sub, Tuple{Int,Int}; optimize=false)
# breaking recursion in print_to_string makes it possible to compile
# even in the presence of the above bug
Native.code_llvm(devnull, Base.print_to_string, Tuple{Int,Int}; optimize=false)
@test "We did not crash!" != ""
end
@testset "LLVM D32593" begin
mod = @eval module $(gensym())
struct D32593_struct
foo::Float32
bar::Float32
end
D32593(ptr) = unsafe_load(ptr).foo
end
Native.code_llvm(devnull, mod.D32593, Tuple{Ptr{mod.D32593_struct}})
@test "We did not crash!" != ""
end
@testset "slow abi" begin
x = 2
f = () -> x+1
ir = sprint(io->Native.code_llvm(io, f, Tuple{}, entry_abi=:func, dump_module=true))
@test occursin(r"define nonnull {}\* @jfptr", ir) ||
occursin(r"define nonnull ptr @jfptr", ir)
@test occursin(r"define internal fastcc .+ @julia", ir)
@test occursin(r"call fastcc .+ @julia", ir)
end
@testset "function entry safepoint emission" begin
ir = sprint(io->Native.code_llvm(io, identity, Tuple{Nothing}; entry_safepoint=false, optimize=false, dump_module=true))
@test !occursin("%safepoint", ir)
ir = sprint(io->Native.code_llvm(io, identity, Tuple{Nothing}; entry_safepoint=true, optimize=false, dump_module=true))
@test occursin("%safepoint", ir)
end
@testset "always_inline" begin
# XXX: broken by JuliaLang/julia#51599, see JuliaGPU/GPUCompiler.jl#527
mod = @eval module $(gensym())
f_expensive(x) = $(foldl((e, _) -> :($sink($e) + $sink(x)), 1:100; init=:x))
function g(x)
f_expensive(x)
return
end
function h(x)
f_expensive(x)
return
end
end
ir = sprint(io->Native.code_llvm(io, mod.g, Tuple{Int64}; dump_module=true, kernel=true))
@test occursin(r"^define.*julia_f_expensive"m, ir)
ir = sprint(io->Native.code_llvm(io, mod.g, Tuple{Int64}; dump_module=true, kernel=true,
always_inline=true))
@test !occursin(r"^define.*julia_f_expensive"m, ir)
ir = sprint(io->Native.code_llvm(io, mod.h, Tuple{Int64}; dump_module=true, kernel=true,
always_inline=true))
@test !occursin(r"^define.*julia_f_expensive"m, ir)
ir = sprint(io->Native.code_llvm(io, mod.h, Tuple{Int64}; dump_module=true, kernel=true))
@test occursin(r"^define.*julia_f_expensive"m, ir)
end
@testset "function attributes" begin
@inline function convergent_barrier()
Base.llvmcall(("""
declare void @barrier() #1
define void @entry() #0 {
call void @barrier()
ret void
}
attributes #0 = { alwaysinline }
attributes #1 = { convergent }""", "entry"),
Nothing, Tuple{})
end
ir = sprint(io->Native.code_llvm(io, convergent_barrier, Tuple{}; dump_module=true, raw=true))
@test occursin(r"attributes #. = \{ convergent \}", ir)
end
end
############################################################################################
@testset "assembly" begin
@testset "basic reflection" begin
valid_kernel() = return
invalid_kernel() = 1
@test Native.code_native(devnull, valid_kernel, Tuple{}) == nothing
@test Native.code_native(devnull, invalid_kernel, Tuple{}) == nothing
@test_throws KernelError Native.code_native(devnull, invalid_kernel, Tuple{}; kernel=true)
end
@testset "idempotency" begin
# bug: generate code twice for the same kernel (jl_to_ptx wasn't idempotent)
kernel() = return
Native.code_native(devnull, kernel, Tuple{})
Native.code_native(devnull, kernel, Tuple{})
@test "We did not crash!" != ""
end
@testset "compile for host after gpu" begin
# issue #11: re-using host functions after GPU compilation
@noinline child(i) = sink(i+1)
function fromhost()
child(10)
end
function fromptx()
child(10)
return
end
Native.code_native(devnull, fromptx, Tuple{})
@test fromhost() == 11
end
end
############################################################################################
@testset "errors" begin
struct CleverType{T}
x::T
end
Base.unsafe_trunc(::Type{Int}, x::CleverType) = unsafe_trunc(Int, x.x)
@testset "non-isbits arguments" begin
foobar(i) = (sink(unsafe_trunc(Int,i)); return)
@test_throws_message(KernelError,
Native.code_execution(foobar, Tuple{BigInt})) do msg
occursin("passing and using non-bitstype argument", msg) &&
occursin("BigInt", msg)
end
# test that we get information about fields and reason why something is not isbits
@test_throws_message(KernelError,
Native.code_execution(foobar, Tuple{CleverType{BigInt}})) do msg
occursin("passing and using non-bitstype argument", msg) &&
occursin("CleverType", msg) &&
occursin("BigInt", msg)
end
end
@testset "invalid LLVM IR" begin
mod = @eval module $(gensym())
export foobar
foobar(i) = println(i)
end
@test_throws_message(InvalidIRError,
Native.code_execution(mod.foobar, Tuple{Int})) do msg
occursin("invalid LLVM IR", msg) &&
(occursin(GPUCompiler.RUNTIME_FUNCTION, msg) ||
occursin(GPUCompiler.UNKNOWN_FUNCTION, msg) ||
occursin(GPUCompiler.DYNAMIC_CALL, msg)) &&
occursin("[1] println", msg) &&
occursin("[2] foobar", msg)
end
end
@testset "invalid LLVM IR (ccall)" begin
mod = @eval module $(gensym())
export foobar
function foobar(p)
unsafe_store!(p, ccall(:time, Cint, ()))
return
end
end
@test_throws_message(InvalidIRError,
Native.code_execution(mod.foobar, Tuple{Ptr{Int}})) do msg
if VERSION >= v"1.11-"
occursin("invalid LLVM IR", msg) &&
occursin(GPUCompiler.LAZY_FUNCTION, msg) &&
occursin("call to time", msg) &&
occursin("[1] foobar", msg)
else
occursin("invalid LLVM IR", msg) &&
occursin(GPUCompiler.POINTER_FUNCTION, msg) &&
occursin("[1] foobar", msg)
end
end
end
@testset "delayed bindings" begin
mod = @eval module $(gensym())
export kernel
function kernel()
undefined
return
end
end
@test_throws_message(InvalidIRError,
Native.code_execution(mod.kernel, Tuple{})) do msg
occursin("invalid LLVM IR", msg) &&
occursin(GPUCompiler.DELAYED_BINDING, msg) &&
occursin(r"use of '.*undefined'", msg) &&
occursin("[1] kernel", msg)
end
end
@testset "dynamic call (invoke)" begin
mod = @eval module $(gensym())
@noinline nospecialize_child(@nospecialize(i)) = i
kernel(a, b) = (unsafe_store!(b, nospecialize_child(a)); return)
end
@test_throws_message(InvalidIRError,
Native.code_execution(mod.kernel, Tuple{Int,Ptr{Int}})) do msg
occursin("invalid LLVM IR", msg) &&
occursin(GPUCompiler.DYNAMIC_CALL, msg) &&
occursin("call to nospecialize_child", msg) &&
occursin("[1] kernel", msg)
end
end
@testset "dynamic call (apply)" begin
mod = @eval module $(gensym())
export func
func() = println(1)
end
@test_throws_message(InvalidIRError,
Native.code_execution(mod.func, Tuple{})) do msg
occursin("invalid LLVM IR", msg) &&
occursin(GPUCompiler.DYNAMIC_CALL, msg) &&
occursin("call to println", msg) &&
occursin("[2] func", msg)
end
end
end
############################################################################################
@testset "overrides" begin
# NOTE: method overrides do not support redefinitions, so we use different kernels
mod = @eval module $(gensym())
kernel() = child()
child() = 0
end
ir = sprint(io->Native.code_llvm(io, mod.kernel, Tuple{}))
@test occursin("ret i64 0", ir)
mod = @eval module $(gensym())
using ..GPUCompiler
Base.Experimental.@MethodTable(method_table)
kernel() = child()
child() = 0
Base.Experimental.@overlay method_table child() = 1
end
ir = sprint(io->Native.code_llvm(io, mod.kernel, Tuple{}; mod.method_table))
@test occursin("ret i64 1", ir)
end
@testset "#366: semi-concrete interpretation + overlay methods = dynamic dispatch" begin
mod = @eval module $(gensym())
using ..GPUCompiler
using StaticArrays
function kernel(width, height)
xy = SVector{2, Float32}(0.5f0, 0.5f0)
res = SVector{2, UInt32}(width, height)
floor.(UInt32, max.(0f0, xy) .* res)
return
end
Base.Experimental.@MethodTable method_table
Base.Experimental.@overlay method_table Base.isnan(x::Float32) =
(ccall("extern __nv_isnanf", llvmcall, Int32, (Cfloat,), x)) != 0
end
ir = sprint(io->Native.code_llvm(io, mod.kernel, Tuple{Int, Int};
debuginfo=:none, mod.method_table))
@test !occursin("apply_generic", ir)
@test occursin("llvm.floor", ir)
end
@testset "JuliaLang/julia#48097: kwcall inference in the presence of overlay method" begin
# XXX: broken again by JuliaLang/julia#51092, see JuliaGPU/GPUCompiler.jl#506
mod = @eval module $(gensym())
child(; kwargs...) = return
function parent()
child(; a=1f0, b=1.0)
return
end
Base.Experimental.@MethodTable method_table
Base.Experimental.@overlay method_table @noinline Core.throw_inexacterror(f::Symbol, ::Type{T}, val) where {T} = return
end
ir = sprint(io->Native.code_llvm(io, mod.parent, Tuple{};
debuginfo=:none, mod.method_table))
@test occursin("ret void", ir)
@test !any(f->occursin(f, ir),
["jl_invoke", "apply_iterate",
"inttoptr", "apply_type"])
end
end # testitem
@testitem "native precompile" setup=[Precompile,] begin
using Test
precompile_test_harness("Inference caching") do load_path
# Write out the Native test setup as a micro package
create_standalone(load_path, "NativeCompiler", "native_testsetup.jl")
write(joinpath(load_path, "InferenceCaching.jl"), :(module InferenceCaching
import NativeCompiler
import GPUCompiler
using PrecompileTools
function kernel(A, x)
A[1] = x
return
end
let
job, _ = NativeCompiler.create_job(kernel, (Vector{Int}, Int))
precompile(job)
end
# identity is foreign
@setup_workload begin
job, _ = NativeCompiler.create_job(identity, (Int,))
@compile_workload begin
precompile(job)
end
end
end) |> string)
Base.compilecache(Base.PkgId("InferenceCaching"))
@eval let
import NativeCompiler
# Check that no cached entry is present
identity_mi = GPUCompiler.methodinstance(typeof(identity), Tuple{Int})
token = let
job, _ = NativeCompiler.create_job(identity, (Int,))
GPUCompiler.ci_cache_token(job)
end
@test !check_presence(identity_mi, token)
using InferenceCaching
# Check that kernel survived
kernel_mi = GPUCompiler.methodinstance(typeof(InferenceCaching.kernel), Tuple{Vector{Int}, Int})
@test check_presence(kernel_mi, token)
# check that identity survived
@test check_presence(identity_mi, token)
GPUCompiler.clear_disk_cache!()
@test GPUCompiler.disk_cache_enabled() == false
GPUCompiler.enable_disk_cache!()
@test GPUCompiler.disk_cache_enabled() == true
job, _ = NativeCompiler.create_job(InferenceCaching.kernel, (Vector{Int}, Int))
@assert job.source == kernel_mi
ci = GPUCompiler.ci_cache_lookup(GPUCompiler.ci_cache(job), job.source, job.world, job.world)
@assert ci !== nothing
@assert ci.inferred !== nothing
path = GPUCompiler.cache_file(ci, job.config)
@test path !== nothing
@test !ispath(path)
NativeCompiler.cached_execution(InferenceCaching.kernel, (Vector{Int}, Int))
@test ispath(path)
GPUCompiler.clear_disk_cache!()
@test !ispath(path)
end
end
############################################################################################
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 3109 | @testsetup module Native
using GPUCompiler
# create a native test compiler, and generate reflection methods for it
include("runtime.jl")
# local method table for device functions
Base.Experimental.@MethodTable(test_method_table)
struct CompilerParams <: AbstractCompilerParams
entry_safepoint::Bool
method_table
CompilerParams(entry_safepoint::Bool=false, method_table=test_method_table) =
new(entry_safepoint, method_table)
end
NativeCompilerJob = CompilerJob{NativeCompilerTarget,CompilerParams}
GPUCompiler.runtime_module(::NativeCompilerJob) = TestRuntime
GPUCompiler.method_table(@nospecialize(job::NativeCompilerJob)) = job.config.params.method_table
GPUCompiler.can_safepoint(@nospecialize(job::NativeCompilerJob)) = job.config.params.entry_safepoint
function create_job(@nospecialize(func), @nospecialize(types); kernel::Bool=false,
entry_abi=:specfunc, entry_safepoint::Bool=false, always_inline=false,
method_table=test_method_table, kwargs...)
source = methodinstance(typeof(func), Base.to_tuple_type(types), Base.get_world_counter())
target = NativeCompilerTarget()
params = CompilerParams(entry_safepoint, method_table)
config = CompilerConfig(target, params; kernel, entry_abi, always_inline)
CompilerJob(source, config), kwargs
end
function code_typed(@nospecialize(func), @nospecialize(types); kwargs...)
job, kwargs = create_job(func, types; kwargs...)
GPUCompiler.code_typed(job; kwargs...)
end
function code_warntype(io::IO, @nospecialize(func), @nospecialize(types); kwargs...)
job, kwargs = create_job(func, types; kwargs...)
GPUCompiler.code_warntype(io, job; kwargs...)
end
function code_llvm(io::IO, @nospecialize(func), @nospecialize(types); kwargs...)
job, kwargs = create_job(func, types; kwargs...)
GPUCompiler.code_llvm(io, job; kwargs...)
end
function code_native(io::IO, @nospecialize(func), @nospecialize(types); kwargs...)
job, kwargs = create_job(func, types; kwargs...)
GPUCompiler.code_native(io, job; kwargs...)
end
# aliases without ::IO argument
for method in (:code_warntype, :code_llvm, :code_native)
method = Symbol("$(method)")
@eval begin
$method(@nospecialize(func), @nospecialize(types); kwargs...) =
$method(stdout, func, types; kwargs...)
end
end
# simulates codegen for a kernel function: validates by default
function code_execution(@nospecialize(func), @nospecialize(types); kwargs...)
job, kwargs = create_job(func, types; kernel=true, kwargs...)
JuliaContext() do ctx
GPUCompiler.compile(:asm, job; kwargs...)
end
end
const runtime_cache = Dict{Any, Any}()
function compiler(job)
JuliaContext() do ctx
GPUCompiler.compile(:asm, job, validate=false)
end
end
function linker(job, asm)
asm
end
# simulates cached codegen
function cached_execution(@nospecialize(func), @nospecialize(types); kwargs...)
job, kwargs = create_job(func, types; kwargs...)
GPUCompiler.cached_compilation(runtime_cache, job.source, job.config, compiler, linker)
end
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 1819 | @testsetup module Precompile
using Test
using ReTestItems
export precompile_test_harness, check_presence, create_standalone
function precompile_test_harness(@nospecialize(f), testset::String)
@testset "$testset" begin
precompile_test_harness(f, true)
end
end
function precompile_test_harness(@nospecialize(f), separate::Bool)
load_path = mktempdir()
load_cache_path = separate ? mktempdir() : load_path
try
pushfirst!(LOAD_PATH, load_path)
pushfirst!(DEPOT_PATH, load_cache_path)
f(load_path)
finally
try
rm(load_path, force=true, recursive=true)
catch err
@show err
end
if separate
try
rm(load_cache_path, force=true, recursive=true)
catch err
@show err
end
end
filter!((≠)(load_path), LOAD_PATH)
separate && filter!((≠)(load_cache_path), DEPOT_PATH)
end
nothing
end
function check_presence(mi, token)
found = false
ci = isdefined(mi, :cache) ? mi.cache : nothing
while ci !== nothing
if ci.owner === token && ci.max_world == typemax(UInt)
found = true
break
end
ci = isdefined(ci, :next) ? ci.next : nothing
end
return found
end
function create_standalone(load_path, name::String, file)
cp(joinpath(@__DIR__, "runtime.jl"), joinpath(load_path, "runtime.jl"), force=true)
TS = include(file)
code = TS.code
if code.head == :begin
code.head = :block
end
@assert code.head == :block
code = Expr(:module, true, Symbol(name), code)
# Write out the test setup as a micro package
write(joinpath(load_path, "$name.jl"), string(code))
Base.compilecache(Base.PkgId(name))
end
end # testsetup
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 12591 | @testitem "PTX" setup=[PTX, Helpers] begin
using LLVM
############################################################################################
@testset "IR" begin
@testset "exceptions" begin
foobar() = throw(DivideError())
ir = sprint(io->PTX.code_llvm(io, foobar, Tuple{}))
# plain exceptions should get lowered to a call to the GPU run-time
@test occursin("gpu_report_exception", ir)
# not a jl_throw referencing a jl_value_t representing the exception
@test !occursin("jl_throw", ir)
end
@testset "kernel functions" begin
@testset "kernel argument attributes" begin
mod = @eval module $(gensym())
kernel(x) = return
struct Aggregate
x::Int
end
end
ir = sprint(io->PTX.code_llvm(io, mod.kernel, Tuple{mod.Aggregate}))
@test occursin(r"@julia_kernel\w*\(({ i64 }|\[1 x i64\])\* ", ir) ||
occursin(r"@julia_kernel\w*\(ptr ", ir)
ir = sprint(io->PTX.code_llvm(io, mod.kernel, Tuple{mod.Aggregate}; kernel=true))
@test occursin(r"@_Z6kernel9Aggregate\(.*({ i64 }|\[1 x i64\]) ", ir)
end
@testset "property_annotations" begin
kernel() = return
ir = sprint(io->PTX.code_llvm(io, kernel, Tuple{}; dump_module=true))
@test !occursin("nvvm.annotations", ir)
ir = sprint(io->PTX.code_llvm(io, kernel, Tuple{};
dump_module=true, kernel=true))
@test occursin("nvvm.annotations", ir)
@test !occursin("maxntid", ir)
@test !occursin("reqntid", ir)
@test !occursin("minctasm", ir)
@test !occursin("maxnreg", ir)
ir = sprint(io->PTX.code_llvm(io, kernel, Tuple{};
dump_module=true, kernel=true, maxthreads=42))
@test occursin("maxntidx\", i32 42", ir)
@test occursin("maxntidy\", i32 1", ir)
@test occursin("maxntidz\", i32 1", ir)
ir = sprint(io->PTX.code_llvm(io, kernel, Tuple{};
dump_module=true, kernel=true, minthreads=42))
@test occursin("reqntidx\", i32 42", ir)
@test occursin("reqntidy\", i32 1", ir)
@test occursin("reqntidz\", i32 1", ir)
ir = sprint(io->PTX.code_llvm(io, kernel, Tuple{};
dump_module=true, kernel=true, blocks_per_sm=42))
@test occursin("minctasm\", i32 42", ir)
ir = sprint(io->PTX.code_llvm(io, kernel, Tuple{};
dump_module=true, kernel=true, maxregs=42))
@test occursin("maxnreg\", i32 42", ir)
end
LLVM.version() >= v"8" && @testset "calling convention" begin
kernel() = return
ir = sprint(io->PTX.code_llvm(io, kernel, Tuple{}; dump_module=true))
@test !occursin("ptx_kernel", ir)
ir = sprint(io->PTX.code_llvm(io, kernel, Tuple{};
dump_module=true, kernel=true))
@test occursin("ptx_kernel", ir)
end
@testset "kernel state" begin
# state should be passed by value to kernel functions
mod = @eval module $(gensym())
export kernel
kernel() = return
end
ir = sprint(io->PTX.code_llvm(io, mod.kernel, Tuple{}))
@test occursin(r"@julia_kernel\w*\(\)", ir)
ir = sprint(io->PTX.code_llvm(io, mod.kernel, Tuple{}; kernel=true))
@test occursin("@_Z6kernel([1 x i64] %state)", ir)
# state should only passed to device functions that use it
mod = @eval module $(gensym())
@noinline child1(ptr) = unsafe_load(ptr)
@noinline function child2()
data = $PTX.kernel_state().data
ptr = reinterpret(Ptr{Int}, data)
unsafe_load(ptr)
end
function kernel(ptr)
unsafe_store!(ptr, child1(ptr) + child2())
return
end
end
ir = sprint(io->PTX.code_llvm(io, mod.kernel, Tuple{Ptr{Int64}};
kernel=true, dump_module=true))
# kernel should take state argument before all else
@test occursin(r"@_Z6kernelP5Int64\(\[1 x i64\] %state", ir)
# child1 doesn't use the state
@test occursin(r"@julia_child1\w*\((i64|i8\*|ptr)", ir)
# child2 does
@test occursin(r"@julia_child2\w*\(\[1 x i64\] %state", ir)
# can't have the unlowered intrinsic
@test !occursin("julia.gpu.state_getter", ir)
end
end
end
############################################################################################
@testset "assembly" begin
@testset "child functions" begin
# we often test using @noinline child functions, so test whether these survive
# (despite not having side-effects)
mod = @eval module $(gensym())
import ..sink
export child, parent
@noinline child(i) = sink(i)
function parent(i)
child(i)
return
end
end
asm = sprint(io->PTX.code_native(io, mod.parent, Tuple{Int64}))
@test occursin(r"call.uni\s+julia_child_"m, asm)
end
@testset "kernel functions" begin
mod = @eval module $(gensym())
import ..sink
export nonentry, entry
@noinline nonentry(i) = sink(i)
function entry(i)
nonentry(i)
return
end
end
asm = sprint(io->PTX.code_native(io, mod.entry, Tuple{Int64};
kernel=true, dump_module=true))
@test occursin(".visible .entry _Z5entry5Int64", asm)
@test !occursin(".visible .func julia_nonentry", asm)
@test occursin(".func julia_nonentry", asm)
@testset "property_annotations" begin
asm = sprint(io->PTX.code_native(io, mod.entry, Tuple{Int64}; kernel=true))
@test !occursin("maxntid", asm)
asm = sprint(io->PTX.code_native(io, mod.entry, Tuple{Int64};
kernel=true, maxthreads=42))
@test occursin(".maxntid 42, 1, 1", asm)
asm = sprint(io->PTX.code_native(io, mod.entry, Tuple{Int64};
kernel=true, minthreads=42))
@test occursin(".reqntid 42, 1, 1", asm)
asm = sprint(io->PTX.code_native(io, mod.entry, Tuple{Int64};
kernel=true, blocks_per_sm=42))
@test occursin(".minnctapersm 42", asm)
if LLVM.version() >= v"4.0"
asm = sprint(io->PTX.code_native(io, mod.entry, Tuple{Int64};
kernel=true, maxregs=42))
@test occursin(".maxnreg 42", asm)
end
end
end
@testset "child function reuse" begin
# bug: depending on a child function from multiple parents resulted in
# the child only being present once
mod = @eval module $(gensym())
import ..sink
export child, parent1, parent2
@noinline child(i) = sink(i)
function parent1(i)
child(i)
return
end
function parent2(i)
child(i+1)
return
end
end
asm = sprint(io->PTX.code_native(io, mod.parent1, Tuple{Int}))
@test occursin(".func julia_child_", asm)
asm = sprint(io->PTX.code_native(io, mod.parent2, Tuple{Int}))
@test occursin(".func julia_child_", asm)
end
@testset "child function reuse bis" begin
# bug: similar, but slightly different issue as above
# in the case of two child functions
mod = @eval module $(gensym())
import ..sink
export parent1, parent2, child1, child2
@noinline child1(i) = sink(i)
@noinline child2(i) = sink(i+1)
function parent1(i)
child1(i) + child2(i)
return
end
function parent2(i)
child1(i+1) + child2(i+1)
return
end
end
asm = sprint(io->PTX.code_native(io, mod.parent1, Tuple{Int}))
@test occursin(".func julia_child1_", asm)
@test occursin(".func julia_child2_", asm)
asm = sprint(io->PTX.code_native(io, mod.parent2, Tuple{Int}))
@test occursin(".func julia_child1_", asm)
@test occursin(".func julia_child2_", asm)
end
@testset "indirect sysimg function use" begin
# issue #9: re-using sysimg functions should force recompilation
# (host fldmod1->mod1 throws, so the PTX code shouldn't contain a throw)
# NOTE: Int32 to test for #49
function kernel(out)
wid, lane = fldmod1(unsafe_load(out), Int32(32))
unsafe_store!(out, wid)
return
end
asm = sprint(io->PTX.code_native(io, kernel, Tuple{Ptr{Int32}}))
@test !occursin("jl_throw", asm)
@test !occursin("jl_invoke", asm) # forced recompilation should still not invoke
end
@testset "LLVM intrinsics" begin
# issue #13 (a): cannot select trunc
function kernel(x)
unsafe_trunc(Int, x)
return
end
PTX.code_native(devnull, kernel, Tuple{Float64})
@test "We did not crash!" != ""
end
@testset "exception arguments" begin
function kernel(a)
unsafe_store!(a, trunc(Int, unsafe_load(a)))
return
end
PTX.code_native(devnull, kernel, Tuple{Ptr{Float64}})
@test "We did not crash!" != ""
end
@testset "GC and TLS lowering" begin
mod = @eval module $(gensym())
import ..sink
mutable struct PleaseAllocate
y::Csize_t
end
# common pattern in Julia 0.7: outlined throw to avoid a GC frame in the calling code
@noinline function inner(x)
sink(x.y)
nothing
end
function kernel(i)
inner(PleaseAllocate(Csize_t(42)))
nothing
end
end
asm = sprint(io->PTX.code_native(io, mod.kernel, Tuple{Int}))
@test occursin("gpu_gc_pool_alloc", asm)
@test !occursin("julia.push_gc_frame", asm)
@test !occursin("julia.pop_gc_frame", asm)
@test !occursin("julia.get_gc_frame_slot", asm)
@test !occursin("julia.new_gc_frame", asm)
# make sure that we can still ellide allocations
function ref_kernel(ptr, i)
data = Ref{Int64}()
data[] = 0
if i > 1
data[] = 1
else
data[] = 2
end
unsafe_store!(ptr, data[], i)
return nothing
end
asm = sprint(io->PTX.code_native(io, ref_kernel, Tuple{Ptr{Int64}, Int}))
@test !occursin("gpu_gc_pool_alloc", asm)
end
@testset "float boxes" begin
function kernel(a,b)
c = Int32(a)
# the conversion to Int32 may fail, in which case the input Float32 is boxed in order to
# pass it to the @nospecialize exception constructor. we should really avoid that (eg.
# by avoiding @nospecialize, or optimize the unused arguments away), but for now the box
# should just work.
unsafe_store!(b, c)
return
end
ir = sprint(io->PTX.code_llvm(io, kernel, Tuple{Float32,Ptr{Float32}}))
@test occursin("jl_box_float32", ir)
PTX.code_native(devnull, kernel, Tuple{Float32,Ptr{Float32}})
end
end
end # testitem
@testitem "PTX precompile" setup=[Precompile,] begin
precompile_test_harness("Inference caching") do load_path
# Write out the PTX test setup as a micro package
create_standalone(load_path, "PTXCompiler", "ptx_testsetup.jl")
write(joinpath(load_path, "InferenceCaching.jl"), :(module InferenceCaching
import PTXCompiler
import GPUCompiler
using PrecompileTools
function kernel()
return
end
let
job, _ = PTXCompiler.create_job(kernel, ())
precompile(job)
end
# identity is foreign
@setup_workload begin
job, _ = PTXCompiler.create_job(identity, (Int,))
@compile_workload begin
precompile(job)
end
end
end) |> string)
Base.compilecache(Base.PkgId("InferenceCaching"))
@eval let
import PTXCompiler
# Check that no cached entry is present
identity_mi = GPUCompiler.methodinstance(typeof(identity), Tuple{Int})
token = let
job, _ = PTXCompiler.create_job(identity, (Int,))
GPUCompiler.ci_cache_token(job)
end
ci = isdefined(identity_mi, :cache) ? identity_mi.cache : nothing
while ci !== nothing
@test ci.owner !== token
ci = isdefined(ci, :next) ? ci.next : nothing
end
using InferenceCaching
# Check that kernel survived
kernel_mi = GPUCompiler.methodinstance(typeof(InferenceCaching.kernel), Tuple{})
@test check_presence(kernel_mi, token)
# check that identity survived
@test check_presence(identity_mi, token)
end
end
############################################################################################
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 2906 | @testsetup module PTX
using GPUCompiler
# create a PTX-based test compiler, and generate reflection methods for it
include("runtime.jl")
struct CompilerParams <: AbstractCompilerParams end
PTXCompilerJob = CompilerJob{PTXCompilerTarget,CompilerParams}
struct PTXKernelState
data::Int64
end
GPUCompiler.kernel_state_type(@nospecialize(job::PTXCompilerJob)) = PTXKernelState
@inline @generated kernel_state() = GPUCompiler.kernel_state_value(PTXKernelState)
# a version of the test runtime that has some side effects, loading the kernel state
# (so that we can test if kernel state arguments are appropriately optimized away)
module PTXTestRuntime
using ..GPUCompiler
import ..PTXKernelState
function signal_exception()
kernel_state()
return
end
# dummy methods
# HACK: if malloc returns 0 or traps, all calling functions (like jl_box_*)
# get reduced to a trap, which really messes with our test suite.
malloc(sz) = Ptr{Cvoid}(Int(0xDEADBEEF))
report_oom(sz) = return
report_exception(ex) = return
report_exception_name(ex) = return
report_exception_frame(idx, func, file, line) = return
end
GPUCompiler.runtime_module(::PTXCompilerJob) = PTXTestRuntime
function create_job(@nospecialize(func), @nospecialize(types); kernel::Bool=false,
minthreads=nothing, maxthreads=nothing, blocks_per_sm=nothing,
maxregs=nothing, always_inline=false, kwargs...)
source = methodinstance(typeof(func), Base.to_tuple_type(types), Base.get_world_counter())
target = PTXCompilerTarget(;cap=v"7.0",
minthreads, maxthreads,
blocks_per_sm, maxregs)
params = CompilerParams()
config = CompilerConfig(target, params; kernel, always_inline)
CompilerJob(source, config), kwargs
end
function code_typed(@nospecialize(func), @nospecialize(types); kwargs...)
job, kwargs = create_job(func, types; kwargs...)
GPUCompiler.code_typed(job; kwargs...)
end
function code_warntype(io::IO, @nospecialize(func), @nospecialize(types); kwargs...)
job, kwargs = create_job(func, types; kwargs...)
GPUCompiler.code_warntype(io, job; kwargs...)
end
function code_llvm(io::IO, @nospecialize(func), @nospecialize(types); kwargs...)
job, kwargs = create_job(func, types; kwargs...)
GPUCompiler.code_llvm(io, job; kwargs...)
end
function code_native(io::IO, @nospecialize(func), @nospecialize(types); kwargs...)
job, kwargs = create_job(func, types; kwargs...)
GPUCompiler.code_native(io, job; kwargs...)
end
# simulates codegen for a kernel function: validates by default
function code_execution(@nospecialize(func), @nospecialize(types); kwargs...)
job, kwargs = create_job(func, types; kernel=true, kwargs...)
JuliaContext() do ctx
GPUCompiler.compile(:asm, job; kwargs...)
end
end
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 1267 | using GPUCompiler, LLVM
GPUCompiler.reset_runtime()
using InteractiveUtils
@info "System information:\n" * sprint(io->versioninfo(io; verbose=true))
import SPIRV_LLVM_Translator_unified_jll
import SPIRV_Tools_jll
using ReTestItems
runtests(GPUCompiler; nworkers=min(Sys.CPU_THREADS,4), nworker_threads=1,
testitem_timeout=120) do ti
if ti.name == "GCN" && LLVM.is_asserts()
# XXX: GCN's non-0 stack address space triggers LLVM assertions due to Julia bugs
return false
end
@dispose ctx=Context() begin
# XXX: some back-ends do not support opaque pointers
if ti.name in ["Metal"] && !supports_typed_pointers(ctx)
return false
end
end
if ti.name in ["PTX", "GCN", "PTX precompile"] && Sys.isapple()
# support for AMDGPU and NVTX on macOS has been removed from Julia's LLVM build
return false
end
if ti.name in ["SPIRV"] && !(SPIRV_LLVM_Translator_unified_jll.is_available() && SPIRV_Tools_jll.is_available())
# SPIRV needs it's tools to be available
return false
end
if ti.name in ["PTX precompile", "native precompile"] && VERSION < v"1.11-"
# precompile needs v1.11
return false
end
true
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 259 | module TestRuntime
# dummy methods
signal_exception() = return
malloc(sz) = C_NULL
report_oom(sz) = return
report_exception(ex) = return
report_exception_name(ex) = return
report_exception_frame(idx, func, file, line) = return
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 3218 | @testitem "SPIRV" setup=[SPIRV, Helpers] begin
using SPIRV_LLVM_Translator_unified_jll, SPIRV_Tools_jll
############################################################################################
@testset "IR" begin
@testset "kernel functions" begin
@testset "calling convention" begin
kernel() = return
ir = sprint(io->SPIRV.code_llvm(io, kernel, Tuple{}; dump_module=true))
@test !occursin("spir_kernel", ir)
ir = sprint(io->SPIRV.code_llvm(io, kernel, Tuple{};
dump_module=true, kernel=true))
@test occursin("spir_kernel", ir)
end
@testset "byval workaround" begin
mod = @eval module $(gensym())
export kernel
kernel(x) = return
end
ir = sprint(io->SPIRV.code_llvm(io, mod.kernel, Tuple{Tuple{Int}}))
@test occursin(r"@\w*kernel\w*\(({ i64 }|\[1 x i64\])\*", ir) ||
occursin(r"@\w*kernel\w*\(ptr", ir)
ir = sprint(io->SPIRV.code_llvm(io, mod.kernel, Tuple{Tuple{Int}}; kernel=true))
@test occursin(r"@\w*kernel\w*\(.*{ ({ i64 }|\[1 x i64\]) }\*.+byval", ir) ||
occursin(r"@\w*kernel\w*\(ptr byval", ir)
end
@testset "byval bug" begin
# byval added alwaysinline, which could conflict with noinline and fail verification
@noinline kernel() = return
SPIRV.code_llvm(devnull, kernel, Tuple{}; kernel=true)
@test "We did not crash!" != ""
end
end
@testset "unsupported type detection" begin
mod = @eval module $(gensym())
export kernel
function kernel(ptr, val)
unsafe_store!(ptr, val)
return
end
end
ir = sprint(io->SPIRV.code_llvm(io, mod.kernel, Tuple{Ptr{Float16}, Float16}; validate=true))
@test occursin("store half", ir)
ir = sprint(io->SPIRV.code_llvm(io, mod.kernel, Tuple{Ptr{Float32}, Float32}; validate=true))
@test occursin("store float", ir)
ir = sprint(io->SPIRV.code_llvm(io, mod.kernel, Tuple{Ptr{Float64}, Float64}; validate=true))
@test occursin("store double", ir)
@test_throws_message(InvalidIRError,
SPIRV.code_llvm(devnull, mod.kernel, Tuple{Ptr{Float16}, Float16};
supports_fp16=false, validate=true)) do msg
occursin("unsupported use of half value", msg) &&
occursin("[1] unsafe_store!", msg) &&
occursin("[2] kernel", msg)
end
@test_throws_message(InvalidIRError,
SPIRV.code_llvm(devnull, mod.kernel, Tuple{Ptr{Float64}, Float64};
supports_fp64=false, validate=true)) do msg
occursin("unsupported use of double value", msg) &&
occursin("[1] unsafe_store!", msg) &&
occursin("[2] kernel", msg)
end
end
end
############################################################################################
@testset "asm" begin
@testset "trap removal" begin
function kernel(x)
x && error()
return
end
asm = sprint(io->SPIRV.code_native(io, kernel, Tuple{Bool}; kernel=true))
@test occursin(r"OpFunctionCall %void %julia_error", asm)
end
end
############################################################################################
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 1814 | @testsetup module SPIRV
using GPUCompiler
# create a SPIRV-based test compiler, and generate reflection methods for it
include("runtime.jl")
struct CompilerParams <: AbstractCompilerParams end
GPUCompiler.runtime_module(::CompilerJob{<:Any,CompilerParams}) = TestRuntime
function create_job(@nospecialize(func), @nospecialize(types);
kernel::Bool=false, always_inline=false,
supports_fp16=true, supports_fp64=true, kwargs...)
source = methodinstance(typeof(func), Base.to_tuple_type(types), Base.get_world_counter())
target = SPIRVCompilerTarget(; supports_fp16, supports_fp64)
params = CompilerParams()
config = CompilerConfig(target, params; kernel, always_inline)
CompilerJob(source, config), kwargs
end
function code_typed(@nospecialize(func), @nospecialize(types); kwargs...)
job, kwargs = create_job(func, types; kwargs...)
GPUCompiler.code_typed(job; kwargs...)
end
function code_warntype(io::IO, @nospecialize(func), @nospecialize(types); kwargs...)
job, kwargs = create_job(func, types; kwargs...)
GPUCompiler.code_warntype(io, job; kwargs...)
end
function code_llvm(io::IO, @nospecialize(func), @nospecialize(types); kwargs...)
job, kwargs = create_job(func, types; kwargs...)
GPUCompiler.code_llvm(io, job; kwargs...)
end
function code_native(io::IO, @nospecialize(func), @nospecialize(types); kwargs...)
job, kwargs = create_job(func, types; kwargs...)
GPUCompiler.code_native(io, job; kwargs...)
end
# simulates codegen for a kernel function: validates by default
function code_execution(@nospecialize(func), @nospecialize(types); kwargs...)
job, kwargs = create_job(func, types; kernel=true, kwargs...)
JuliaContext() do ctx
GPUCompiler.compile(:asm, job; kwargs...)
end
end
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 2525 | @testitem "util" begin
@testset "split_kwargs" begin
kwargs = [:(a=1), :(b=2), :(c=3), :(d=4)]
groups = GPUCompiler.split_kwargs(kwargs, [:a], [:b, :c])
@test length(groups) == 3
@test groups[1] == [:(a=1)]
@test groups[2] == [:(b=2), :(c=3)]
@test groups[3] == [:(d=4)]
end
@testset "mangle" begin
struct XX{T} end
# values checked with c++filt / cu++filt
@test GPUCompiler.mangle_sig(Tuple{typeof(sin), XX{false}}) == "_Z3sin2XXILb0EE" # "sin(XX<false>)"
@test GPUCompiler.mangle_sig(Tuple{typeof(sin), XX{true}}) == "_Z3sin2XXILb1EE" # "sin(XX<true>)"
@test GPUCompiler.mangle_sig(Tuple{typeof(sin), XX{Cshort(10)}}) == "_Z3sin2XXILs10EE" # "sin(XX<(short)10>)"
@test GPUCompiler.mangle_sig(Tuple{typeof(sin), XX{Cshort(0)}}) == "_Z3sin2XXILs0EE" # "sin(XX<(short)l>)"
@test GPUCompiler.mangle_sig(Tuple{typeof(sin), XX{Cshort(-10)}}) == "_Z3sin2XXILsn10EE" # "sin(XX<(short)-10>)"
end
@testset "safe loggers" begin
using Logging: Logging
struct YieldingLogger <: Logging.AbstractLogger
logger::Logging.AbstractLogger
YieldingLogger() = new(Logging.current_logger())
end
function Logging.handle_message(logger::YieldingLogger, args...)
yield()
return Logging.handle_message(logger.logger, args...)
end
Logging.shouldlog(::YieldingLogger, ::Any...) = true
Logging.min_enabled_level(::YieldingLogger) = Logging.Debug
GPUCompiler.@locked function f()
GPUCompiler.@safe_debug "safe_debug"
GPUCompiler.@safe_info "safe_info"
GPUCompiler.@safe_warn "safe_warn"
GPUCompiler.@safe_error "safe_error"
GPUCompiler.@safe_show "safe_show"
end
@test begin
@sync begin
Threads.@spawn begin
sleep(0.1)
@debug "debug"
sleep(0.1)
@info "info"
sleep(0.1)
@warn "warn"
sleep(0.1)
@error "error"
sleep(0.1)
@show "show"
sleep(0.1)
end
pipe = Pipe()
Base.link_pipe!(pipe; reader_supports_async=true, writer_supports_async=true)
Threads.@spawn print(stdout, read(pipe, String))
Threads.@spawn Logging.with_logger(YieldingLogger()) do
sleep(0.1)
redirect_stdout(f, pipe)
close(pipe)
end
end
true
end
end
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | docs | 1517 | # GPUCompiler.jl
*Reusable compiler infrastructure for Julia GPU backends.*
| **Build Status** | **Coverage** |
|:--------------------------------------------------------------------------------------------------:|:-------------------------------:|
| [![][buildkite-img]][buildkite-url] [![][gha-img]][gha-url] [![PkgEval][pkgeval-img]][pkgeval-url] | [![][codecov-img]][codecov-url] |
[buildkite-img]: https://badge.buildkite.com/512eb7dd35ca5b427ddf3240e2b4b3022f0c4f9925f1bdafa8.svg?branch=master
[buildkite-url]: https://buildkite.com/julialang/gpucompiler-dot-jl
[gha-img]: https://github.com/JuliaGPU/GPUCompiler.jl/workflows/CI/badge.svg?branch=master
[gha-url]: https://github.com/JuliaGPU/GPUCompiler.jl/actions?query=workflow%3ACI
[pkgeval-img]: https://juliaci.github.io/NanosoldierReports/pkgeval_badges/G/GPUCompiler.svg
[pkgeval-url]: https://juliaci.github.io/NanosoldierReports/pkgeval_badges/G/GPUCompiler.html
[codecov-img]: https://codecov.io/gh/JuliaGPU/GPUCompiler.jl/branch/master/graph/badge.svg
[codecov-url]: https://codecov.io/gh/JuliaGPU/GPUCompiler.jl
This package offers reusable compiler infrastructure and tooling for
implementing GPU compilers in Julia. **It is not intended for end users!**
Instead, you should use one of the packages that builds on GPUCompiler.jl, such
as [CUDA.jl](https://github.com/JuliaGPU/CUDA.jl) or [AMDGPU.jl](https://github.com/JuliaGPU/AMDGPU.jl).
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.3.0 | bd11a5492a2b50db534baec5826029e5f80a4445 | code | 2856 | using Distributions, DataFrames, DataFramesMeta, Arrow, Statistics, Dates, CSV, ODBC, WRDSMerger
using Revise
using AbnormalReturns
##
conn = ODBC.Connection("wrds-pgdata-64")
##
df_ff_data = @chain ff_data(conn) begin
@rtransform(:mkt = :mktrf + :rf)
end
ff_data_max = maximum(df_ff_data.date)
@time df_crsp_raw = @chain crsp_data(conn, Date(1990), ff_data_max; cols=["ret"]) begin
sort([:permno, :date])
end
data = MarketData(
df_ff_data,
df_crsp_raw
)
##
mkt_mat = @chain df_ff_data begin
select([:mktrf, :smb, :hml, :umd])
dropmissing
Matrix
cov
end
# 4×4 Matrix{Float64}:
# 0.000116248 -1.03659e-5 1.1843e-5 -1.04678e-5
# -1.03659e-5 3.49562e-5 -2.37018e-6 1.9103e-6
# 1.1843e-5 -2.37018e-6 3.77871e-5 -1.05367e-5
# -1.04678e-5 1.9103e-6 -1.05367e-5 5.98879e-5
##
@time df_all_regs = @chain df_crsp_raw begin
select([:permno, :date])
dropmissing
@by(
:permno,
:date_start = minimum(:date),
:date_end = maximum(:date)
)
@transform(:reg = AbnormalReturns.vector_reg(data, :permno, :date_start, :date_end, @formula(ret ~ mktrf + smb + hml + umd); minobs=100, save_residuals=true))
end
##
function winsorize(vect::AbstractArray, low_per::AbstractFloat=0.01, high_per::AbstractFloat=0.99)
lower = quantile(skipmissing(vect), low_per)
upper = quantile(skipmissing(vect), high_per)
returnVect = vect[:]
for (i, v) in enumerate(vect)
if typeof(v) == Missing
continue
end
if v > upper
returnVect[i] = upper
elseif v < lower
returnVect[i] = lower
end
end
return returnVect
end
df_coefs = @chain df_all_regs begin
@rtransform(:r2 = r2(:reg))
dropmissing
@rtransform(:coefs = NamedTuple{(Symbol.(coefnames(:reg))..., ^(:var))}((coef(:reg)..., var(:reg))))
_.coefs
DataFrame
rename("(Intercept)" => "int")
@transform(
:int = winsorize(:int),
:mktrf = winsorize(:mktrf),
:smb = winsorize(:smb),
:hml = winsorize(:hml),
:umd = winsorize(:umd),
:var = winsorize(:var)
)
end
##
coef_means = mean(df_coefs[:, 1:5] |> Matrix, dims=1)[1, :]
# [
# 0.0003416652538979692,
# 0.7557301603393168,
# 0.6035365140401592,
# 0.13841355976862427,
# -0.0917780093857371,
# ]
coef_cov = cov(df_coefs[:, 1:5] |> Matrix)
# 2.52326e-6 -4.71181e-5 3.31182e-5 -7.05013e-5 1.32415e-5
# -4.71181e-5 0.27552 0.173033 0.0742338 -0.0448335
# 3.31182e-5 0.173033 0.389159 0.0460528 -0.0280572
# -7.05013e-5 0.0742338 0.0460528 0.40226 -0.000262376
# 1.32415e-5 -0.0448335 -0.0280572 -0.000262376 0.183439
fit_mle(Gamma, @rsubset(df_coefs, :var > 0).var)
# Gamma{Float64}(α=0.5792772418365709, θ=0.00448646291687775) | AbnormalReturns | https://github.com/junder873/AbnormalReturns.jl.git |
|
[
"MIT"
] | 0.3.0 | bd11a5492a2b50db534baec5826029e5f80a4445 | code | 2602 | using Distributions, DataFrames, DataFramesMeta, Statistics, BusinessDays, Dates, CSV, Arrow
##
BusinessDays.initcache(:USNYSE)
##
dates = listbdays(:USNYSE, Date(1980), Date(2020))
mkt_mat = [
0.000116248 -1.03659e-5 1.1843e-5 -1.04678e-5;
-1.03659e-5 3.49562e-5 -2.37018e-6 1.9103e-6;
1.1843e-5 -2.37018e-6 3.77871e-5 -1.05367e-5;
-1.04678e-5 1.9103e-6 -1.05367e-5 5.98879e-5;
]
df_mkt_benchmark = DataFrame(
rand(MvNormal(mkt_mat), length(dates))' |> Matrix, [:mkt, :smb, :hml, :umd])
df_mkt_benchmark[!, :date] = dates
select!(df_mkt_benchmark, [:date, :mkt, :smb, :hml, :umd])
##
count = 10000
coef_means = [
0.0003416652538979692,
0.7557301603393168,
0.6035365140401592,
0.13841355976862427,
-0.0917780093857371,
]
coef_cov = [
2.52326e-6 -4.71181e-5 3.31182e-5 -7.05013e-5 1.32415e-5;
-4.71181e-5 0.27552 0.173033 0.0742338 -0.0448335;
3.31182e-5 0.173033 0.389159 0.0460528 -0.0280572;
-7.05013e-5 0.0742338 0.0460528 0.40226 -0.000262376;
1.32415e-5 -0.0448335 -0.0280572 -0.000262376 0.183439;
]
d = Gamma(0.5792772418365709, 0.00448646291687775)
df_firm_resp = DataFrame(hcat(
1:count |> collect,
rand(MvNormal(coef_means, coef_cov), count)' |> Matrix,
rand(d, count)
), [:firm_id, :int, :mkt, :smb, :hml, :umd, :var]
)
@transform!(df_firm_resp, :firm_id = Int.(:firm_id))
##
main_mkt_mat = df_mkt_benchmark[:, [:mkt, :smb, :hml, :umd]] |> Matrix
firm_mats = collect.(Tuple.(Tables.rowtable(df_firm_resp[:, [:mkt, :smb, :hml, :umd]])))
firm_errors = rand.(Normal.(0, df_firm_resp.var), nrow(df_mkt_benchmark))
##
df_rand = flatten(DataFrame(
firm_id = 1:count,
ret = Ref(main_mkt_mat) .* firm_mats .+ firm_errors
), :ret)
df_rand = @chain df_rand begin
groupby(:firm_id)
@transform(:date = df_mkt_benchmark.date)
select([:firm_id, :date, :ret])
end
##
CSV.write(joinpath("data", "mkt_ret.csv"), df_mkt_benchmark)
CSV.write(joinpath("data", "firm_ret.csv"), df_rand)
##
df_events = DataFrame(
firm_id = rand(1:count, 1_000_000),
event_date = rand(Date(1982):Day(1):Date(2019, 12), 1_000_000)
)
df_events = @chain df_events begin
@rtransform(
:event_window_start = advancebdays("USNYSE", :event_date, -10),
:event_window_end = advancebdays("USNYSE", :event_date, 10),
:est_window_start = :event_date - Year(1),
:est_window_end = advancebdays("USNYSE", :event_date, -11),
)
end
CSV.write(joinpath("data", "event_dates.csv"), df_events) | AbnormalReturns | https://github.com/junder873/AbnormalReturns.jl.git |
|
[
"MIT"
] | 0.3.0 | bd11a5492a2b50db534baec5826029e5f80a4445 | code | 1506 | using CSV, Dates, BenchmarkTools, InMemoryDatasets
using Revise
using AbnormalReturns
##
ds_firm = CSV.File(joinpath("data", "firm_ret.csv")) |> Dataset
ds_mkt = CSV.File(joinpath("data", "mkt_ret.csv")) |> Dataset
ds_events = CSV.File(joinpath("data", "event_dates.csv")) |> Dataset |> unique
##
@time ds_all = innerjoin(
ds_firm,
ds_mkt,
on=[:date]
)
# 8.563892 seconds (4.58 k allocations: 7.422 GiB, 19.65% gc time, 0.05% compilation time)
##
@time ds_event_joined = innerjoin(
ds_all,
ds_events[:, [:firm_id, :event_date, :est_window_start, :est_window_end]],
on=[:firm_id => :firm_id, :date => (:est_window_start, :est_window_end)]
)
# 52.283242 seconds (11.54 k allocations: 23.178 GiB, 7.14% gc time)
##
@time groupby!(ds_event_joined, [:firm_id, :event_date])
# 45.565012 seconds (1.01 M allocations: 26.786 GiB, 16.40% gc time)
##
function simple_reg(xs...)
pred = disallowmissing(hcat(xs[2:end]...))
resp = disallowmissing(xs[1])
[cholesky!(Symmetric(pred' * pred)) \ (pred' * resp)]
end
@time InMemoryDatasets.combine(ds_event_joined, (:ret, :mkt, :hml, :umd, :smb) => simple_reg)
# 15.090089 seconds (35.68 M allocations: 19.679 GiB, 56.77% gc time, 8.81% compilation time)
##
@time ds_event_joined = innerjoin(
ds_all,
ds_events[:, [:firm_id, :event_date, :event_window_start, :event_window_end]],
on=[:firm_id => :firm_id, :date => (:event_window_start, :event_window_end)]
)
##
@time groupby!(ds_event_joined, [:firm_id, :event_date]) | AbnormalReturns | https://github.com/junder873/AbnormalReturns.jl.git |
|
[
"MIT"
] | 0.3.0 | bd11a5492a2b50db534baec5826029e5f80a4445 | code | 4024 | # This file runs the benchmark using real world data (which includes missing data)
# the goal is partially to test that the functions work properly
# the data used in this test is proprietary, but it is data downloaded from the WRDS database
# and is the CRSP daily file and the Fama French factors file, the CRSP daily file goes back
# well before the test starts (test starts in 2015, CRSP daily file goes to 1986 in this version)
# and Fama French file goes back to 1926
using CSV, DataFramesMeta, Dates
using Revise
using AbnormalReturns
df_crsp = CSV.File(joinpath("data", "crsp_entry.csv")) |> DataFrame
df_mkt = @chain CSV.File(joinpath("data", "ff_entry.csv")) begin
DataFrame
@rtransform(:mkt = :mktrf + :rf)
select([:date, :mkt, :mktrf, :smb, :hml, :umd, :rf])
end
df_res = CSV.File(joinpath("data", "results.csv")) |> DataFrame
mkt_data = MarketData(df_mkt, df_crsp)
##
df_test = @chain df_crsp begin
@rsubset(Date(2015) <= :date <= Date(2019))
@by(
:permno,
:date = minimum(:date):Day(1):maximum(:date) # redo to every date to include weekends
)
end
##
@time df_test = @chain df_test begin
@transform(:reg = quick_reg(mkt_data[:permno, :date-Year(1) .. :date-Day(1)], @formula(ret ~ mkt)))
@transform(
:bh_ret = bh_return(mkt_data[:permno, :date .. :date + Week(2), ["ret"]]),
:car_raw = car(mkt_data[:permno, :date .. :date + Week(2), ["ret", "mkt"]]),
:bhar_raw = bhar(mkt_data[:permno, :date .. :date + Week(2), ["ret", "mkt"]]),
:car_mm = car(mkt_data[:permno, :date .. :date + Week(2)], :reg),
:bhar_mm = bhar(mkt_data[:permno, :date .. :date + Week(2)], :reg),
:r2_mm = r2.(:reg),
:std_mm = std.(:reg)
)
select(Not(:reg))
@transform(:reg = quick_reg(mkt_data[:permno, :date-Year(1) .. :date-Day(1)], @formula(ret ~ mkt + smb + umd + hml)))
@transform(
:car_ffm = car(mkt_data[:permno, :date .. :date + Week(2)], :reg),
:bhar_ffm = bhar(mkt_data[:permno, :date .. :date + Week(2)], :reg),
:r2_ffm = r2.(:reg),
:std_ffm = std.(:reg)
)
select(Not(:reg))
@transform(:reg = quick_reg(mkt_data[:permno, :date-Year(1) .. :date-Day(1)], @formula(ret ~ 0 + mkt + smb + umd + hml)))
@transform(
:car_ffm2 = car(mkt_data[:permno, :date .. :date + Week(2)], :reg),
:bhar_ffm2 = bhar(mkt_data[:permno, :date .. :date + Week(2)], :reg),
:r2_ffm2 = r2.(:reg),
:std_ffm2 = std.(:reg)
)
select(Not(:reg))
end
# A little over 10 million observations
# First Run R7 5700X: 38.191368 seconds (200.79 M allocations: 55.747 GiB, 24.94% gc time, 215.70% compilation time: <1% of which was recompilation)
# Second Run: 35.790831 seconds (182.35 M allocations: 60.448 GiB, 37.53% gc time, 1.01% compilation time)
# changing sparsevec to vector in iteratetimelinetable reduced time by about 10 seconds
# dealing with missing values at the start or end of a range in the setup saves another few seconds
# changing the r[Not(missing_bdays)] to a custom function that creates the vector first saves around 30 seconds
# the main cost are the cases where there is a single or short set of missing values, getting rid of those saves over 30 seconds
# this is largely due to setting up the data where it creates a view that is noncontinuous, which means creating the entire vector
# that way
##
approx_or_missing(x::Missing, y::Missing) = true
approx_or_missing(x::Real, y::Real) = isapprox(x, y)
approx_or_missing(x, y) = false
df_res2 = @chain df_res begin
outerjoin(
_,
df_test,
on=[:permno, :date],
validate=(true, true),
makeunique=true
)
end
df_res2[!, :all_equal] .= true
for col in names(df_res)
println(col)
col ∈ ("permno", "date") && continue
col1 = col * "_1"
col1 ∉ names(df_res2)
df_res2[!, :all_equal] = df_res2.all_equal .* approx_or_missing.(df_res2[:, col], df_res2[:, col1])
end
@chain df_res2 begin
@rsubset(!:all_equal)
end
| AbnormalReturns | https://github.com/junder873/AbnormalReturns.jl.git |
|
[
"MIT"
] | 0.3.0 | bd11a5492a2b50db534baec5826029e5f80a4445 | code | 3163 | using DataFrames, CSV, DataFramesMeta, Dates, BenchmarkTools, SparseArrays, StaticArrays, LinearAlgebra
using Revise
using AbnormalReturns
##
df_firm = CSV.File(joinpath("data", "firm_ret.csv")) |> DataFrame
df_mkt = CSV.File(joinpath("data", "mkt_ret.csv")) |> DataFrame
df_events = CSV.File(joinpath("data", "event_dates.csv")) |> DataFrame
##
@time data = MarketData(df_mkt, df_firm; id_col=:firm_id, valuecols_firms=[:ret])
# First run R7 5700X: 8.356466 seconds (14.89 M allocations: 11.201 GiB, 3.74% gc time, 57.41% compilation time: <1% of which was recompilation)
# Second run R7 5700X: 4.575627 seconds (527.51 k allocations: 10.303 GiB, 19.86% gc time)
# First run i7 6700: 31.045712 seconds (35.03 M allocations: 13.222 GiB, 32.21% gc time, 64.39% compilation time)
# Second run i7 6700: 11.217124 seconds (537.50 k allocations: 11.431 GiB, 25.19% gc time)
##
df_firm = nothing
df_mkt = nothing
GC.gc()
##
@time df_temp = @chain df_events begin
@transform(:reg_mkt = quick_reg(data[:firm_id, :est_window_start .. :est_window_end], @formula(ret ~ mkt)),)
@transform(:reg_ffm = quick_reg(data[:firm_id, :est_window_start .. :est_window_end], @formula(ret ~ mkt + smb + hml + umd)),)
@transform(
:bhar_mkt = bhar(data[:firm_id, :est_window_start .. :est_window_end], :reg_mkt),
:bhar_ffm = bhar(data[:firm_id, :est_window_start .. :est_window_end], :reg_ffm),
)
@rtransform(
:var_mkt = var(:reg_mkt),
:var_ffm = var(:reg_ffm),
)
@transform(
:alpha = alpha(:reg_mkt),
:beta = beta(:reg_mkt),
)
end
# First run R7 5700X: 9.017057 seconds (30.39 M allocations: 3.184 GiB, 10.33% gc time, 85.86% compilation time)
# Second run R7 5700X: 1.453299 seconds (7.18 M allocations: 1.988 GiB, 13.25% gc time, 5.04% compilation time)
# First run i7 6700: 28.656179 seconds (33.49 M allocations: 3.360 GiB, 53.20% gc time, 90.52% compilation time)
# Second run i7 6700: 3.095861 seconds (7.13 M allocations: 1.985 GiB, 2.87% compilation time)
##
@time @chain df_events begin # parse formula every time and perform all checks each time
@rtransform(:reg = quick_reg(data[:firm_id, :est_window_start .. :est_window_end], @formula(ret ~ mkt + smb + hml + umd)),)
end
# Run R7 5700X: 26.472138 seconds (360.50 M allocations: 25.194 GiB, 11.69% gc time, 2.24% compilation time)
# i7 6700: 231.401211 seconds (363.87 M allocations: 25.336 GiB, 83.97% gc time, 0.04% compilation time)
##
@time @chain df_events begin # full multithread
@transform(:reg = quick_reg(data[:firm_id, :est_window_start .. :est_window_end], @formula(ret ~ mkt + smb + hml + umd)),)
end
# Run R7 5700X: 0.437344 seconds (2.07 M allocations: 637.053 MiB, 5.50% compilation time)
##
@time @chain df_events begin # not multi threaded
@transform(:reg = quick_reg.(data[:firm_id, :est_window_start .. :est_window_end, @formula(ret ~ mkt + smb + hml + umd)], Ref(@formula(ret ~ mkt + smb + hml + umd))),)
end
# Run R7 5700X: 2.890529 seconds (4.91 M allocations: 795.016 MiB, 5.99% gc time, 6.97% compilation time)
# i7 6700: 4.215225 seconds (4.08 M allocations: 752.040 MiB, 1.12% compilation time)
| AbnormalReturns | https://github.com/junder873/AbnormalReturns.jl.git |
|
[
"MIT"
] | 0.3.0 | bd11a5492a2b50db534baec5826029e5f80a4445 | code | 346 | using AbnormalReturns
using Documenter
Documenter.makedocs(
modules = [AbnormalReturns],
sitename = "AbnormalReturns.jl",
pages = [
"Introduction" => "index.md",
"Example" => "example.md",
"API" => "api.md"
]
)
deploydocs(
repo = "github.com/junder873/AbnormalReturns.jl.git",
target = "build",
) | AbnormalReturns | https://github.com/junder873/AbnormalReturns.jl.git |
|
[
"MIT"
] | 0.3.0 | bd11a5492a2b50db534baec5826029e5f80a4445 | code | 1232 | module AbnormalReturns
using LinearAlgebra
using StatsBase
using Reexport
using Statistics
using Dates
using DataFrames
using Tables
using IntervalSets: ClosedInterval, (..)
using SparseArrays
@reexport using BusinessDays
@reexport using StatsModels
using StaticArrays
using OffsetArrays
##############################################################################
##
## Exported methods and types
##
##############################################################################
# types and functions for fast CAR calculations
export MarketData, FixedTable, car, alpha, beta,
BasicReg, quick_reg, IterateFixedTable,
bh_return, bhar, MarketCalendar
export getindex, names
# From Statistics
export var, std
# From StatsBase
export coef, coefnames, responsename, nobs, dof_residual,
r2, adjr2, islinear, deviance, rss, predict
export ClosedInterval, ..
##############################################################################
##
## Load files
##
##############################################################################
include("marketCalendar.jl")
include("timelineData.jl")
include("calcUtils.jl")
include("iterateTimelineTable.jl")
include("fastRegression.jl")
include("calcFunctions.jl")
end | AbnormalReturns | https://github.com/junder873/AbnormalReturns.jl.git |
|
[
"MIT"
] | 0.3.0 | bd11a5492a2b50db534baec5826029e5f80a4445 | code | 10157 |
function bh_return(vals::AbstractVector{Float64})
out = 1.0
@simd for x in vals
out *= (1 + x)
end
out - 1
end
function bh_return(vals::AbstractVector{Union{Missing, Float64}})
out = 1.0
@simd for x in vals
if ismissing(x)
out *= 1
else
out *= (1+x)
end
end
out - 1
end
function bh_return(pred::FixedTable{N, T}, coef::SVector{N, T}) where {N, T}
#@assert size(pred, 2) == length(coef) "Got Matrix of size $(size(pred)) and coefficients of $coef $pred"
out = one(T)
@simd for i in 1:size(pred)[1]
out *= (point_pred(pred, coef, i) + 1)
end
out - 1
end
bhar(resp, pred, coef) = bh_return(resp) - bh_return(pred, coef)
bhar(x::AbstractVector, y::AbstractVector) = bh_return(x) - bh_return(y)
function cumulative_return(pred::FixedTable{N, T}, coef::SVector{N, T}) where {N, T}
#@assert size(pred, 2) == length(coef) "Got Matrix of size $(size(pred)) and coefficients of $coef $pred"
out = zero(T)
@simd for i in 1:size(pred)[1]
out += point_pred(pred, coef, i)
end
out
end
car(resp, pred, coef) = sum(resp) - cumulative_return(pred, coef)
car(x::AbstractVector, y::AbstractVector) = sum(skipmissing(x)) - sum(skipmissing(y))
var_diff(x, y) = var(x) + var(y) - 2 * cov(x, y)
# for firm data
"""
Calculates the buy and hold returns (also called geometric return).
These functions treat missing returns in the period implicitly as a zero return.
"""
function bh_return(data::FixedTable{1}; minobs=0.8)
d = data[:, 1]
if length(d) < adjust_minobs(minobs, data)
missing
else
bh_return(d)
end
end
function bh_return(
data::IterateFixedTable{T, 1};
minobs=0.8
) where {T}
out = Vector{Union{Missing, Float64}}(missing, length(data))
Threads.@threads for i in 1:length(data)
out[i] = bh_return(data[i]; minobs)
end
if any(ismissing.(out))
out
else
disallowmissing(out)
end
end
function sum_return(data::FixedTable{1}; minobs=0.8)
d = data[:, 1]
if length(d) < adjust_minobs(minobs, data)
missing
else
sum(d)
end
end
function simple_diff(
data::FixedTable{2},
fun;
minobs=0.8
)
x = data[:, 1]
y = data[:, 2]
if length(x) < adjust_minobs(minobs, data)
missing
else
fun(x, y)
end
end
function pred_diff(
data::FixedTable{N1},
rr::RegressionModel,
fun;
minobs=0.8
) where {N1}
#@assert N1 == N2 + 1 "Dimensions are mismatched"
if !isdefined(rr, :coefnames)
return missing
end
if size(data, 1) < adjust_minobs(minobs, data)
return missing
else
fun(data[:, 1], pred_matrix(data), coef(rr))
end
end
function simple_diff(
data::IterateFixedTable{T, 2},
fun;
minobs=0.8
) where {T}
out = Vector{Union{Missing, Float64}}(missing, length(data))
Threads.@threads for i in 1:length(data)
out[i] = simple_diff(data[i], fun; minobs)
end
if any(ismissing.(out))
out
else
disallowmissing(out)
end
end
function pred_diff(
data::IterateFixedTable{T, N1},
rrs::AbstractVector{BasicReg{L, R, N2}},
fun;
minobs=0.8
) where {L, R, N1, N2, T}
@assert N1 == N2 + 1 "Dimensions are mismatched"
@assert length(data) == length(rrs) "Vectors are not the same length"
out = Vector{Union{Missing, Float64}}(missing, length(data))
Threads.@threads for i in 1:length(data)
out[i] = pred_diff(data[i], rrs[i], fun; minobs)
end
if any(ismissing.(out))
out
else
disallowmissing(out)
end
end
function pred_diff(
data::Tuple,
rr,
args...;
vargs...
)
f = if isa(rr, AbstractVector)
first(rr).formula
else
rr.formula
end
pred_diff(data[1][data[2:end]..., f, check_intercept=false], rr, args...; vargs...)
end
"""
bhar(
data::FixedTable{T, 2};
minobs=0.8
) where {T}
bhar(
data::FixedTable,
rr::RegressionModel;
minobs=0.8
)
bhar(
data::IterateFixedTable{T, 2};
minobs=0.8
) where {T, MNames, FNames}
bhar(
data::IterateFixedTable,
rrs::AbstractVector{<:BasicReg};
minobs=0.8
)
Calculates the difference between buy and hold returns (also referred to as geometric returns) for a firm and a benchmark.
If a regression is passed, then the benchmark is based on the coefficients from that regression and the performance of the benchmarks
in the regression. These are sometimes called Fama-French abnormal returns. If no regression is passed,
abnormal returns are calculated as the difference between the first and second columns in
the FixedTable (second column is typically the benchmark such as the S&P 500 or a value weighted return of all firms).
Similar to constructing the regression, passing an `IterateFixedTable` will return a Vector and uses a more optimized method.
"""
bhar(data::Union{IterateFixedTable, FixedTable}; minobs=0.8) = simple_diff(data, bhar; minobs)
bhar(data::Union{IterateFixedTable, FixedTable, Tuple}, rr; minobs=0.8) = pred_diff(data, rr, bhar; minobs)
"""
car(
data::FixedTable{T, 2};
minobs=0.8
) where {T}
car(
data::FixedTable,
rr::RegressionModel;
minobs=0.8
)
car(
data::IterateFixedTable{T, 2};
minobs=0.8
) where {T, MNames, FNames}
car(
data::IterateFixedTable,
rrs::AbstractVector{<:BasicReg};
minobs=0.8
)
Calculates the cumulative returns of a firm over a benchmark (through addition of each return).
If a regression is passed, then the benchmark is based on the coefficients from that regression and the performance of the benchmarks
in the regression. These are sometimes called Fama-French abnormal returns. If no regression is passed,
abnormal returns are calculated as the difference between the first and second columns in
the FixedTable (second column is typically the benchmark such as the S&P 500 or a value weighted return of all firms).
Similar to constructing the regression, passing an `IterateFixedTable` will return a Vector and uses a more optimized method.
"""
car(data::Union{IterateFixedTable, FixedTable}; minobs=0.8) = simple_diff(data, car; minobs)
car(data::Union{IterateFixedTable, FixedTable, Tuple}, rr; minobs=0.8) = pred_diff(data, rr, car; minobs)
"""
var[std](rr::Union{AbstractVector{<:RegressionModel}, RegressionModel})
var[std](data::Union{IterateFixedTable, FixedTable}; minobs=0.8)
If a regression model is passed, then this calculates the variance (standard deviation)
based on the residual sum of squares divided by the degrees of freedom. A vector of
RegressionModel will return the same length of vector results.
If a FixedTable is passed (or an IterateFixedTable), and that contains only one column,
then the variance (standard deviation) is calculated for that column. If it has two
columns, then the calculation is based on the difference between the columns.
"""
Statistics.var(data::Union{IterateFixedTable{T, 2}, FixedTable{2}}; minobs=0.8) where {T} = simple_diff(data, var_diff; minobs)
function Statistics.var(data::FixedTable{1}; minobs=0.8)
d = data[:, 1]
if length(d) < adjust_minobs(minobs, data)
missing
else
var(d)
end
end
Statistics.var(data::IterateFixedTable{T, 1}; minobs=0.8) where {T} = var.(data; minobs)
Statistics.std(data::FixedTable; minobs=0.8) = sqrt(var(data; minobs))
Statistics.std(data::IterateFixedTable; minobs=0.8) = sqrt.(var(data; minobs))
Statistics.var(rr::RegressionModel) = rss(rr) / dof_residual(rr)
Statistics.std(rr::RegressionModel) = sqrt(var(rr))
Statistics.var(rrs::AbstractVector{<:RegressionModel}) = var.(rrs)
Statistics.std(rrs::AbstractVector{<:RegressionModel}) = sqrt.(var(rrs))
function get_coefficient_pos(rr::RegressionModel, coefname::String...)
for x in coefname
if x ∈ coefnames(rr)
return findfirst(x .== coefnames(rr))
end
end
@error("None of $(coefname) is in the RegressionModel model.")
end
function get_coefficient_val(rr::RegressionModel, coefname::String...)
ismissing(coefnames(rr)) && return missing
coef(rr)[get_coefficient_pos(rr, coefname...)]
end
# as an optimization, if all are the same regression, then just find the coefname once
function get_coefficient_val(rrs::Vector{<:RegressionModel}, coefname::String...)
out = Vector{Union{Missing, Float64}}(missing, length(rrs))
pos = 0
for (i, rr) in enumerate(rrs)
if pos == 0 && !ismissing(coefnames(rr))
pos = get_coefficient_pos(rr, coefname...)
end
if pos != 0 && !ismissing(coefnames(rr))
out[i] = coef(rr)[pos]
end
end
out
end
"""
alpha(rr::RegressionModel, coefname::String...="intercept")
"alpha" in respect to the the CAPM model, i.e., the intercept in the model.
This is the alpha from the estimation period.
This function finds the position of the coefficient name provided, defaults to "intercept".
If the coefname is not in the regression, then this function returns an error.
"""
alpha(rr::RegressionModel, coefname::String...="(Intercept)") = get_coefficient_val(rr, coefname...)
alpha(rrs::Vector{<:RegressionModel}, coefname::String...="(Intercept)") = get_coefficient_val(rrs, coefname...)
"""
beta(rr::RegressionModel, coefname::String...=["mkt", "mktrf", "vwretd", "ewretd"])
"beta" in respect to the CAPM model, i.e., the coefficient on the market return minus the risk free rate.
This is the beta from the estimation period.
This function finds the position of the coefficient name provided, defaults to several common market returns.
If the coefname is not in the regression, then this function returns an error.
"""
beta(rr::RegressionModel, coefname::String...=("mkt", "mktrf", "vwretd", "ewretd")...) = get_coefficient_val(rr, coefname...)
beta(rrs::Vector{<:RegressionModel}, coefname::String...=("mkt", "mktrf", "vwretd", "ewretd")...) = get_coefficient_val(rrs, coefname...) | AbnormalReturns | https://github.com/junder873/AbnormalReturns.jl.git |
|
[
"MIT"
] | 0.3.0 | bd11a5492a2b50db534baec5826029e5f80a4445 | code | 3027 |
# These are created to minimize the amount of allocations Julia does
# Julia typically allocates a vector for each loop, which when using
# so many loops, can create real garbage collection problems
# As it turns out, doing sum(abs2, resp - mean(resp)) also does
# an allocation, which could mean allocating a huge amount
# caluclating rss was even worse, so these functions are only
# meant to be used internally but do not allocate if passed a view
function calc_tss(resp::AbstractVector)
out = 0.0
m = mean(resp)
@simd for x in resp
out += (x - m) ^ 2
end
out
end
calc_tss(resp::FixedTable{1}) = calc_tss(resp[:, 1])
function point_pred(pred::FixedTable{N}, coef::SVector{N}, i::Int) where {N}
out = 0.0
@simd for j in 1:N
@inbounds out += pred[i, j] * coef[j]
end
out
end
function calc_rss(resp::AbstractVector, pred::FixedTable{N}, coef::SVector{N}) where {N}
@assert length(resp) == size(pred)[1] "Response is not same length as prediction matrix"
out = 0.0
@simd for i in 1:length(resp)
@inbounds out += (resp[i] - point_pred(pred, coef, i)) ^ 2
end
out
end
calc_rss(resp::FixedTable{1}, pred::FixedTable{N}, coef::SVector{N}) where {N} = calc_rss(resp[:, 1], pred, coef)
function mult_add(x::AbstractVector{T}, y::AbstractVector{T}) where {T}
@assert length(x) == length(y) "Vectors are not the same length"
out = zero(T)
@simd for i in eachindex(x, y)
@inbounds out += x[i] * y[i]
end
out
end
function mult_square(x::FixedTable{N, T}) where {N, T}
out = MMatrix{N, N, T} |> zeros
for i in 1:N
for j in 1:i
out[i, j] = mult_add(
x[:, i],
x[:, j]
)
if j != i
out[j, i] = out[i, j]
end
end
end
SMatrix(out)
end
function Base.:(*)(x::Adjoint{T, <:FixedTable{N1, T}}, y::FixedTable{N2, T}) where {T, N1, N2}
if x.parent === y
return mult_square(y)
end
out = MMatrix{N1, N2, T} |> zeros
for i in 1:N1
for j in 1:N2
out[i, j] = mult_add(x.parent[:, i], y[:, j])
end
end
SMatrix(out)
end
function Base.:(*)(x::Adjoint{T, <:FixedTable{N, T}}, y::FixedTable{1, T}) where {T, N}
out = MVector{N, T} |> zeros
for i in 1:N
out[i] = mult_add(x.parent[:, i], y[:, 1])
end
SVector(out)
end
function Base.:(*)(x::Adjoint{T, <:FixedTable{N, T}}, y::AbstractVector{T}) where {T, N}
out = MVector{N, T} |> zeros
for i in 1:N
out[i] = mult_add(x.parent[:, i], y)
end
SVector(out)
end
function pred_matrix(data::FixedTable{N, T, AV}) where {N, T, AV}
#@assert N >= 2 "Not enough columns"
FixedTable(
SVector{N-1, AV}(data[:, i] for i in 2:N),
SVector{N-1}(names(data)[i] for i in 2:N)
)
end
adjust_minobs(x::Integer, ::FixedTable) = x
function adjust_minobs(x::Real, data::FixedTable)
if x < 1
data.req_length * x
else
x
end
end | AbnormalReturns | https://github.com/junder873/AbnormalReturns.jl.git |
|
[
"MIT"
] | 0.3.0 | bd11a5492a2b50db534baec5826029e5f80a4445 | code | 6821 |
struct BasicReg{L, R, N} <: RegressionModel
nobs::Int
formula::FormulaTerm{L,R}
coef::SVector{N, Float64}
coefnames::SVector{N, String}
yname::String
tss::Float64
rss::Float64
residuals::Union{Vector{Float64}, Nothing}
function BasicReg(nobs, formula::FormulaTerm{L,R}, coef::SVector{N}, coefnames::SVector{N}, yname, tss, rss, residuals) where {L,R,N}
# @assert rss >= 0 "Residual sum of squares must be greater than 0"
# @assert tss >= 0 "Total sum of squares must be greater than 0"
# @assert nobs >= 0 "Observations must be greater than 0"
# @assert length(coef) == length(coefnames) "Number of coefficients must be same as number of coefficient names"
new{L,R,N}(nobs, formula, coef, coefnames, yname, tss, rss, residuals)
end
BasicReg{N}(x::Int, f::FormulaTerm{L,R}) where {L, R, N} = new{L,R,N}(x, f)
end
"""
function BasicReg(
resp::AbstractVector,
pred::AbstractMatrix,
yname::String,
xnames::SVector{N, String},
f::FormulaTerm{L,R};
save_residuals::Bool=false,
minobs=1
)::BasicReg{L,R} where {L,R}
## Arguments
- resp::AbstractVector{Float64}: The "Y" or response in a linear regression
- pred::AbstractMatrix{Float64}: The "X" matrix in a linear regression
- yname::String: The name of the response variable
- xnames::SVector{N, String}: The names of the prediction variables
- f::FormulaTerm{L,R}: A StatsModels.jl formula, saved in the resulting struct
- save_residuals::Bool=false: Whether or not to save the vector of residuals from
the regression. Note for large numbers of regressions this can significantly slow
down the speed
- minobs::Int=1: The minimum length of the response vector for the regression to
run. The regression will also not run if the length of the response vector is
less than or equal to the number of columns in the prediction matrix.
BasicReg is an intentionally simplistic linear regression. It also attempts to produce
a minimum number of allocations if views of vectors are passed.
"""
function BasicReg(
resp::AbstractVector,
pred::AbstractMatrix,
yname::String,
xnames::SVector{N, String},
f::FormulaTerm{L, R};
save_residuals::Bool=false,
minobs=1
) where {L, R, N}
if length(resp) <= size(pred)[2] || length(resp) < minobs
return BasicReg{N}(length(resp), f)
end
coef = cholesky(pred' * pred) \ (pred' * resp)
BasicReg(
length(resp),
f,
SVector{N}(coef),
xnames,
yname,
calc_tss(resp),
calc_rss(resp, pred, coef),
save_residuals ? resp - pred * coef : nothing
)
end
function BasicReg(
tab::FixedTable{N},
f::FormulaTerm;
vargs...
) where {N}
resp = pred_matrix(tab)
BasicReg(tab[:, 1], resp, tab.cols[1], resp.cols, f; vargs...)
end
function BasicReg(
tab::FixedTable{N},
args...;
vargs...
) where {N}
BasicReg(tab[:, 1], pred_matrix(tab), args...; vargs...)
end
"""
quick_reg(
data::FixedTable,
f::FormulaTerm;
minobs::Real=0.8,
save_residuals::Bool=false
)
quick_reg(
data::IterateFixedTable,
f::FormulaTerm;
minobs::Real=0.8,
save_residuals::Bool=false
)
Calculates a linear regression for the supplied data based on the formula (formula from StatsModels.jl).
Unless the formula explicitly excludes the intercept (i.e., `@formula(y ~ 0 + x)`), an intercept is added.
If `data` is of the type `IterateFixedTable`, then the function uses the maximum number of threads
on each `FixedTable` in an optimized way and returns a `Vector{BasicReg}`.
## Arguments
- `minobs::Real`: The minimum number of observations to return a completed regression. If less than 1,
the value is used as a percentage relative to the total number of business days in the time period.
Therefore, the default of 0.8 corresponds to at least 80% of the business days over the time period have values.
- `save_residuals::Bool=false`: Whether to save the residuals into `BasicReg`, This can have significant performance implications.
"""
function quick_reg(
data::IterateFixedTable{T, N},
f::FormulaTerm{L, R};
minobs=0.8,
save_residuals::Bool=false
) where {T, N, L, R}
out = Vector{BasicReg{L, R, N-1}}(undef, length(data))
Threads.@threads for i in 1:length(data)
x = data[i]
out[i] = BasicReg(x, f; minobs=adjust_minobs(minobs, x), save_residuals)
end
out
end
function quick_reg(
data::Tuple,
f::FormulaTerm{L, R};
minobs=0.8,
save_residuals::Bool=false,
check_intercept=true
) where {L, R}
f = adjust_formula(f; check_intercept)
final_data = data[1][data[2:end]..., f, check_intercept=false]
quick_reg(final_data, f; minobs, save_residuals)
end
function quick_reg(
data::FixedTable,
f::FormulaTerm;
minobs=0.8,
save_residuals::Bool=false
)
BasicReg(data, f; minobs=adjust_minobs(minobs, data), save_residuals)
end
StatsBase.predict(mod::BasicReg{L, R, N}, x::FixedTable{N}) where {L, R, N} = x * coef(mod)
StatsBase.predict(mod::BasicReg, x) = x * coef(mod)
StatsBase.coef(x::BasicReg) = isdefined(x, :coefnames) ? x.coef : missing
StatsBase.coefnames(x::BasicReg) = isdefined(x, :coefnames) ? x.coefnames : missing
StatsBase.responsename(x::BasicReg) = isdefined(x, :coefnames) ? x.yname : missing
StatsBase.nobs(x::BasicReg) = x.nobs
StatsBase.dof_residual(x::BasicReg{L, R, N}) where {L, R, N} = isdefined(x, :coefnames) ? nobs(x) - N : missing
StatsBase.r2(x::BasicReg) = 1 - (rss(x) / deviance(x))
StatsBase.adjr2(x::BasicReg) = 1 - rss(x) / deviance(x) * (nobs(x) - 1) / dof_residual(x)
StatsBase.islinear(x::BasicReg) = true
StatsBase.deviance(x::BasicReg) = isdefined(x, :coefnames) ? x.tss : missing
StatsBase.rss(x::BasicReg) = isdefined(x, :coefnames) ? x.rss : missing
function StatsBase.residuals(x::BasicReg)
if !isdefined(x, :coefnames)
return missing
end
if x.residuals === nothing
@error("To access residuals, run `quick_reg` with the option `save_residuals=true`")
else
x.residuals
end
end
function rhs_str(nms, vals; intercept = "(Intercept)")
out = String[]
for (nm, val) in zip(nms, vals)
val_str = string(round(val, digits=3))
if nm == intercept
push!(out, val_str)
else
push!(out, val_str * "*" * nm)
end
end
join(out, " + ")
end
function Base.show(io::IO, rr::BasicReg)
if !isdefined(rr, :coefnames)
print(io, "Obs: $(nobs(rr)), $(rr.formula)")
else
print(io, "Obs: $(nobs(rr)), $(responsename(rr)) ~ $(rhs_str(coefnames(rr), coef(rr))), AdjR2: ", round(adjr2(rr) * 100, digits=3), "%")
end
end | AbnormalReturns | https://github.com/junder873/AbnormalReturns.jl.git |
|
[
"MIT"
] | 0.3.0 | bd11a5492a2b50db534baec5826029e5f80a4445 | code | 5468 |
struct IterateFixedTable{T, N, CL<:Union{Symbol, String}, COL<:Union{MatrixTerm, SVector{N, Symbol}}}
parent::MarketData{T}
col_names::SVector{N, CL}
cols::COL
key_vec::Vector{T}
ranges::Vector{UnitRange{Int}}
missing_vecs::Dict{T, Union{Nothing, OffsetVector{Bool, Vector{Bool}}}}
req_lengths::Vector{Int}
function IterateFixedTable(
data::MarketData{T},
col_names::SVector{N, CL},
cols::COL,
key_vec,
ranges,
missing_vecs,
req_lengths=fill(0, length(key_vec))
) where {T, N, CL, COL}
@assert Set(key_vec) ⊆ Set(keys(missing_vecs)) "Some Keys are Missing"
@assert Set(key_vec) ⊆ Set(keys(data.firmdata)) "Some Keys are not in the Parent Data"
@assert length(key_vec) == length(ranges) == length(req_lengths) "Vectors must be same length"
new{T, N, CL, COL}(data, col_names, cols, key_vec, ranges, missing_vecs, req_lengths)
end
end
parent(data::IterateFixedTable) = data.parent
iter_id(data::IterateFixedTable) = data.key_vec
iter_range(data::IterateFixedTable) = data.ranges
iter_missings(data::IterateFixedTable) = data.missing_vecs
iter_cols(data::IterateFixedTable) = data.cols
iter_col_names(data::IterateFixedTable) = data.col_names
iter_req_lengths(data::IterateFixedTable) = data.req_lengths
function Base.iterate(iter::IterateFixedTable{T}, state=1) where {T}
if state > length(iter)
return nothing
end
(iter[state], state+1)
end
# function Base.eltype(::IterateFixedTable{T, MNames, FNames, N1, N2}) where {T, MNames, FNames, N1, N2}
# Tuple{T, Vector{IterateOutput}}
# end
function Base.length(iter::IterateFixedTable)
length(iter.key_vec)
end
function Base.getindex(data::IterateFixedTable, i::Int)
#1 <= i <= length(data) || throw(BoundsError(data, i))
id = iter_id(data)[i]
r = iter_range(data)[i]
mssngs = iter_missings(data)[id]
if mssngs !== nothing
temp = view(mssngs, r)
if any(temp)
mssngs = mssngs[r]
else
mssngs = nothing
end
end
parent(data)[id, r, iter_cols(data), mssngs, col_names=iter_col_names(data), req_length=iter_req_lengths(data)[i]]
end
Base.firstindex(data::IterateFixedTable) = 1
Base.lastindex(data::IterateFixedTable) = length(data)
function joined_missings(data, id, range_limits, cols)
r = range_limits[id]
out = combine_missing_bdays(get_missing_bdays.(Ref(data), id, Ref(r), cols)...)
if out !== nothing
OffsetVector(Vector(out), first(r)-1)
else
nothing
end
end
function Base.getindex(
data::MarketData{T},
ids::Vector{T},
dates::ClosedInterval{Vector{Date}},
cols
) where {T}
@assert length(ids) == length(dates.left) == length(dates.right) "Vectors are not the same length"
u_ids = unique(ids)
rs = date_range(calendar(data), dates)
r_lengths = length.(rs)
range_limits = Dict(
u_ids .=> [
maximin(interval.(Ref(data), id, cols)...) for id in u_ids
]
)
mssngs = Dict(u_ids .=> joined_missings.(Ref(data), u_ids, Ref(range_limits), Ref(cols)))
Threads.@threads for i in eachindex(ids, rs)
@inbounds rs[i] = maximin(rs[i], range_limits[ids[i]])
# Remove the first missing value from the range limits
x = mssngs[ids[i]]
if x !== nothing
v = view(x, rs[i])
if length(v) > 0 && first(v)
j = findfirst(!, v)
if j === nothing
@inbounds rs[i] = rs[i][end+1:end]
else
@inbounds rs[i] = rs[i][j-1:end]
end
end
if length(v) > 0 && last(v)
j = findlast(!, v)
if j === nothing
@inbounds rs[i] = rs[i][1:0]
else
@inbounds rs[i] = rs[i][1:j-1]
end
end
end
end
if eltype(cols) <: AbstractTerm
col_names = SVector{length(cols)}(coefnames(cols))
final_cols = MatrixTerm(cols)
else
col_names = SVector{length(cols)}(cols)
final_cols = SVector{length(cols)}(Symbol.(cols))
end
IterateFixedTable(
data,
col_names,
final_cols,
ids,
rs,
mssngs,
r_lengths
)
end
function Base.getindex(
data::MarketData{T},
ids::Vector{T},
dates::ClosedInterval{Vector{Date}},
f::FormulaTerm;
check_intercept=true
) where {T}
f = adjust_formula(f; check_intercept)
sch = apply_schema(f, schema(f, data))
out = (sch.lhs, sch.rhs.terms...)
data[ids, dates, out]
end
function Base.show(io::IO, data::IterateFixedTable)
println(io, "Iterable set of FixedTable with $(length(data)) unique datapoints")
show(io, parent(data))
end
function StatsModels.schema(f::FormulaTerm, d::MarketData)
StatsModels.Schema(
Dict(
term.(StatsModels.termvars(f)) .=> ContinuousTerm.(StatsModels.termvars(f), 0.0, 0.0, 0.0, 0.0)
)
)
end
function StatsModels.schema(f::FormulaTerm, d::FixedTable)
StatsModels.Schema(
Dict(
term.(StatsModels.termvars(f)) .=> ContinuousTerm.(StatsModels.termvars(f), 0.0, 0.0, 0.0, 0.0)
)
)
end
function Base.getindex(
data::MarketData{T},
ids::Vector{T},
dates::ClosedInterval{Vector{Date}},
) where {T}
(data, ids, dates)
end | AbnormalReturns | https://github.com/junder873/AbnormalReturns.jl.git |
|
[
"MIT"
] | 0.3.0 | bd11a5492a2b50db534baec5826029e5f80a4445 | code | 4515 | # this is nearly an exact copy of `GenericHolidayCalendar` that BusinessDays.jl
# creates, except that it allows business dyas to be on the weekend and
# instead of a list of holidays it is a list of business days
"""
MarketCalendar
* `bdays`: a vector of business days
* `dtmin`: minimum date allowed to check for bdays in bdays set. Defaults to `min(bdays...)`.
* `dtmax`: maximum date allowed to check for bdays in bdays set. Defaults to `max(bdays...)`.
* `cache`: instance of HolidayCalendarCache.
"""
struct MarketCalendar <: BusinessDays.HolidayCalendar
bdays::Vector{Date}
dtmin::Date
dtmax::Date
isbday_array::Vector{Bool}
bdayscounter_array::Vector{UInt32}
end
Base.:(==)(g1::MarketCalendar, g2::MarketCalendar) = g1.bdays == g2.bdays && g1.dtmin == g2.dtmin && g1.dtmax == g2.dtmax
Base.hash(g::MarketCalendar) = hash(g.bdays) + hash(g.dtmin) + hash(g.dtmax)
"""
MarketCalendar(bdays, [dtmin], [dtmax], [_initcache_])
* `bdays`: a vector of dates
* `dtmin`: minimum date allowed to check for bdays in bdays set. Defaults to `min(bdays...)`.
* `dtmax`: maximum date allowed to check for bdays in bdays set. Defaults to `max(bdays...)`.
* `_initcache_`: initializes the cache for this calendar. Defaults to `true`.
"""
function MarketCalendar(bdays::Vector{Date}, dtmin::Date=minimum(bdays), dtmax::Date=maximum(bdays))
bdays = sort(bdays)
isbday_array = zeros(Bool, Dates.value(dtmax - dtmin)+1)
bdayscounter_array = zeros(UInt32, length(isbday_array))
bdays_idx = 1
for (i, d) in enumerate(dtmin:Day(1):dtmax)
if d == bdays[bdays_idx]
isbday_array[i] = true
bdays_idx += 1
end
if i > 1
bdayscounter_array[i] = bdayscounter_array[i-1] + isbday_array[i]
end
end
market_calendar = MarketCalendar(bdays, dtmin, dtmax, isbday_array, bdayscounter_array)
return market_calendar
end
function BusinessDays.checkbounds(cal::MarketCalendar, dt::Date)
if dt < cal_dt_min(cal) || dt > cal_dt_max(cal)
throw(AssertionError("Date out of calendar bounds: $dt. Allowed dates interval is from $(cal.dtmin) to $(cal.dtmax)."))
end
end
@inline BusinessDays._linenumber(cal::MarketCalendar, dt::Date) = Dates.days(dt) - Dates.days(cal.dtmin) + 1
function BusinessDays.isholiday(cal::MarketCalendar, dt::Date)
!isbday(cal, dt)
end
function BusinessDays.isbday(hc::MarketCalendar, dt::Date)::Bool
BusinessDays.checkbounds(hc, dt)
hc.isbday_array[BusinessDays._linenumber(hc, dt)]
end
function BusinessDays.bdayscount(hc::MarketCalendar, dt0::Date, dt1::Date)::Int
dt0 = tobday(hc, dt0)
dt1 = tobday(hc, dt1)
Int(hc.bdayscounter_array[BusinessDays._linenumber(hc, dt1)]) - Int(hc.bdayscounter_array[BusinessDays._linenumber(hc, dt0)])
end
function BusinessDays.listbdays(hc::MarketCalendar, dt0::Date, dt1::Date)
BusinessDays.checkbounds(hc, dt0)
BusinessDays.checkbounds(hc, dt1)
i1 = findfirst(x -> x >= dt0, hc.bdays)
i2 = findlast(x -> x <= dt1, hc.bdays)
hc.bdays[i1:i2]
end
function date_range(hc::MarketCalendar, dates::ClosedInterval{Date})
date_pos(hc, dates.left, true):date_pos(hc, dates.right)
end
function date_range(hc::MarketCalendar, dates::ClosedInterval{Vector{Date}})
@assert length(dates.left) == length(dates.right) "Date sets are not equal length"
BusinessDays.checkbounds(hc, minimum(dates.left))
BusinessDays.checkbounds(hc, maximum(dates.left))
BusinessDays.checkbounds(hc, minimum(dates.right))
BusinessDays.checkbounds(hc, maximum(dates.right))
out = Vector{UnitRange{Int}}(undef, length(dates.left))
Threads.@threads for i in eachindex(dates.left, dates.right)
@inbounds out[i] = date_pos(hc, dates.left[i], true; perform_check=false):date_pos(hc, dates.right[i]; perform_check=false)
end
out
end
function date_pos(hc::MarketCalendar, date::Date, add_start=false; perform_check=true)
perform_check && BusinessDays.checkbounds(hc, date)
v = 1 + hc.bdayscounter_array[BusinessDays._linenumber(hc, date)] |> Int
if add_start && !isbday(hc, date)
v + 1
else
v
end
end
function date_range(hc::MarketCalendar, dt_min::Date, dt_max::Date)
date_range(hc, dt_min .. dt_max)
end
function Base.show(io::IO, cal::MarketCalendar)
print(io, "MarketCalendar: $(cal.dtmin) .. $(cal.dtmax) with $(sum(cal.isbday_array)) business days")
end
cal_dt_min(x::MarketCalendar) = x.dtmin
cal_dt_max(x::MarketCalendar) = x.dtmax | AbnormalReturns | https://github.com/junder873/AbnormalReturns.jl.git |
|
[
"MIT"
] | 0.3.0 | bd11a5492a2b50db534baec5826029e5f80a4445 | code | 26903 | """
```@setup general
data_dir = joinpath("..", "..", "test", "data") # hide
using CSV, DataFramesMeta, Dates, AbnormalReturns
df_firm = CSV.File(joinpath(data_dir, "daily_ret.csv")) |> DataFrame
df_mkt = CSV.File(joinpath(data_dir, "mkt_ret.csv")) |> DataFrame
df_mkt[!, :mkt] = df_mkt.mktrf .+ df_mkt.rf
df_events = CSV.File(joinpath(data_dir, "firm_earnings_announcements.csv")) |> DataFrame
mkt_data = MarketData(
df_mkt,
df_firm
)
```
"""
"""
struct DataVector
data::OffsetVector{Float64, Vector{Float64}}
missing_bdays::Union{Nothing, OffsetVector{Bool, SparseVector{Bool, Int}}}
interval::UnitRange{Int}
function DataVector(data, missing_bdays, interval)
if missing_bdays !== nothing
@assert length(data) == length(missing_bdays) "Data does not match length of dates or missings"
@assert data.offsets == missing_bdays.offsets
end
@assert length(interval) == length(data)
new(data, missing_bdays, interval)
end
end
DataVector(data::AbstractVector{T}, offset::Int) where {T}
DataVector(data::AbstractVector, d::Date, hc::MarketCalendar)
"""
struct DataVector
data::OffsetVector{Float64, Vector{Float64}}
missing_bdays::Union{Nothing, OffsetVector{Bool, SparseVector{Bool, Int}}}
interval::UnitRange{Int}
function DataVector(data, missing_bdays, interval)
if missing_bdays !== nothing
@assert length(data) == length(missing_bdays) "Data does not match length of dates or missings"
@assert data.offsets == missing_bdays.offsets
end
@assert length(interval) == length(data)
new(data, missing_bdays, interval)
end
end
struct MarketData{T}
calendar::MarketCalendar
marketdata::Dict{Symbol, DataVector} # column names as symbols
firmdata::Dict{T, Dict{Symbol, DataVector}} # data stored by firm id and then by column name as symbol
end
struct AllowMissing{mssng} end
"""
struct FixedTable{N, T<:AbstractFloat, AV <: AbstractVector{T}, CL<:Union{Symbol, String}} <: AbstractMatrix{T}
data::SVector{N, AV}
cols::SVector{N, CL}
req_length::Int
function FixedTable(xs::SVector{N, AV}, cols::SVector{N, CL}, req_length=0) where {T, N, AV<:AbstractVector{T}, CL}
new{N, T, AV, CL}(xs, cols, req_length)
end
end
This provides a fixed-width interface that is designed to allow quick access
(either through accessing a column number `data[:, 1]` or accessing a column
name `data[:, :a]`). `req_length` is an optional parameter that specifies the
length that a user originally requested, which is used in later functions
to determine if the FixedTable has too few rows.
"""
struct FixedTable{N, T<:AbstractFloat, AV <: AbstractVector{T}, CL<:Union{Symbol, String}} <: AbstractMatrix{T}
data::SVector{N, AV}
cols::SVector{N, CL}
req_length::Int
function FixedTable(xs::SVector{N, AV}, cols::SVector{N, CL}, req_length=0) where {T, N, AV<:AbstractVector{T}, CL}
new{N, T, AV, CL}(xs, cols, req_length)
end
end
function all_equal_length(xs)
l1 = length(xs[1])
for x in xs[2:end]
if length(x) != l1
return false
end
end
true
end
#Base.length(data::FixedTable{N}) where {N} = N * length(data[1])
Base.size(data::FixedTable{N}) where {N} = (length(data[:, 1]), N)
Base.size(data::Adjoint{X, FixedTable{N}}) where {X, N} = (N, data.parent[1])
LinearAlgebra.adjoint(x::FixedTable{N, T}) where {N, T} = Adjoint{T, typeof(x)}(x)
"""
Checks whether each firm_id-date pair is unique, assumes that vectors are sorted by firm_id then date
Returns true if there is at least one firm_id-date pair repeated, false if all are unique
"""
function all_unique_obs(firm_ids::AbstractVector, dates::AbstractVector)
@assert length(firm_ids) == length(dates) "Length of vectors are not the same"
for i in 2:length(firm_ids)
@inbounds if firm_ids[i] == firm_ids[i-1] && dates[i] == dates[i-1]
return true
end
end
return false
end
"""
function MarketData(
df_market,
df_firms;
date_col_market=:date,
date_col_firms=:date,
id_col=:permno,
add_intercept_col=true,
valuecols_market=nothing,
valuecols_firms=nothing
)
## Arguments
- `df_market`: A Tables.jl compatible source that stores market data, indexed by date.
The dates must be a unique set. The column name for the date column is specified
by the keyword argument "date_col_market"
- `df_firms`: A Tables.jl compatible source that stores firm data. Each firm must have a
unique set of dates. The column name for the date column is specified
by the keyword argument "date_col_firms" and the firm ID column is specified
by the keyword argument "id_col"
- `valuecols_market=nothing`: If left as nothing, all other columns in `df_market` are
used as the value columns. These are the columns that are stored in the resulting
dataset. Otherwise a vector of Symbol or String specifying column names.
- `valuecols_firms=nothing`: Same as above
- `id_col=:permno`: The column corresponding to the set of firm IDs in `df_firms`
- `add_intercept_col=true`: Whether to add a column to the data for an intercept (which is
always equal to 1)
MarketData is the main data storage structure. Data is stored for each firm in
a Dict, where the data itself is a NamedTuple (names corresponding to column names,
such as "ret"), and the keys for the Dict corresponding to firm IDs. The MarketData
struct also stores overall market data and a calendar of dates.
Any firm data must have a corresponding market data date, so there cannot be a
firm return if there is not a market return on that date.
## Example
```@example general
df_firm = CSV.File(joinpath(data_dir, "daily_ret.csv"))
df_mkt = CSV.File(joinpath(data_dir, "mkt_ret.csv"))
df_mkt[!, :mkt] = df_mkt.mktrf .+ df_mkt.rf
mkt_data = MarketData(
df_mkt,
df_firm
)
```
"""
function MarketData(
df_market,
df_firms;
date_col_market=:date,
date_col_firms=:date,
id_col=:permno,
add_intercept_col=true,
valuecols_market=nothing,
valuecols_firms=nothing
)
df_market = DataFrame(df_market)
df_firms = DataFrame(df_firms)
if add_intercept_col
df_market[!, :intercept] .= 1.0
if valuecols_market !== nothing && :intercept ∉ valuecols_market
push!(valuecols_market, :intercept)
end
end
if valuecols_market === nothing
valuecols_market = Symbol.([n for n in Symbol.(names(df_market)) if n ∉ [date_col_market]])
end
if valuecols_firms === nothing
valuecols_firms = Symbol.([n for n in Symbol.(names(df_firms)) if n ∉ [date_col_firms, id_col]])
end
select!(df_market, vcat([date_col_market], valuecols_market))
dropmissing!(df_market, date_col_market)
#dropmissing!(df_market)
sort!(df_market)
if any(nonunique(df_market, [date_col_market]))
@error("There are duplicate date rows in the market data")
end
select!(df_firms, vcat([id_col, date_col_firms], valuecols_firms))
dropmissing!(df_firms, [id_col, date_col_firms])
sort!(df_firms, [id_col, date_col_firms])
# since this is sorted, just a single iteration is enough to check
if all_unique_obs(df_firms[:, id_col], df_firms[:, date_col_firms])
@error("There are duplicate id-date rows in the firm data")
end
cal = MarketCalendar(df_market[:, date_col_market])
market_data = Dict(
valuecols_market .=>
DataVector.(
Tables.columns(df_market[:, valuecols_market]),
0
# Ref(df_market[:, date_col_market]),
# Ref(cal)
)
)
check_all_businessdays(unique(df_firms[:, date_col_firms]), cal)
gdf = groupby(df_firms, id_col)
df_temp = combine(
gdf,
date_col_firms => (x -> add_missing_bdays(x, cal)) => date_col_firms
)
if nrow(df_temp) > 0
insertcols!(df_temp, [col => missing for col in valuecols_firms]...)
df_firms = vcat(
df_firms,
df_temp
)
sort!(df_firms, [id_col, date_col_firms])
gdf = groupby(df_firms, id_col)
end
col_tab = columntable(gdf)
firm_data = Dict{typeof(col_tab[id_col][1][1]), Dict{Symbol, DataVector}}()
sizehint!(firm_data, length(col_tab[id_col]))
for i in 1:length(col_tab[id_col])
firm_data[col_tab[id_col][i][1]] = Dict(
valuecols_firms .=> (
DataVector(
col_tab[col][i],
col_tab[date_col_firms][i][1],
cal
) for col in valuecols_firms
)
)
end
MarketData(
cal,
market_data,
firm_data
)
end
function check_all_businessdays(dates, cal)
bday_list = isbday(cal, dates)
if !all(bday_list)
bday_list_inv = (!).(bday_list)
if sum(bday_list_inv) <= 3
@error("Dates $(dates[bday_list_inv]) are not in the MARKET_DATA_CACHE")
else
d1_test = findfirst(bday_list_inv)
d2_test = findlast(bday_list_inv)
@error("Dates $(dates[d1_test]) ... $(dates[d2_test]) are not in the MARKET_DATA_CACHE")
end
end
end
function setup_calendar_data(f, data, offset)
if any(ismissing.(data))
missing_days = OffsetVector(sparsevec(any(ismissing, data, dims=2)), offset)
else
missing_days = nothing#OffsetVector(spzeros(Bool, size(data, 1)), offsets)
end
data = OffsetVector(coalesce.(data, zero(nonmissingtype(eltype(data)))), offset)
f(
data,
missing_days,
offset+1:offset+length(data)
)
end
function DataVector(data::AbstractVector{T}, offset::Int) where {T}
if any(ismissing.(data))
if all(ismissing.(data))
return DataVector(
OffsetVector(zeros(nonmissingtype(T), 1), offset),
OffsetVector(sparsevec([1], [true]), offset),
offset+1:offset+1
)
end
i = findfirst(!ismissing, data)
j = findlast(!ismissing, data)
setup_calendar_data(
DataVector,
data[i:j],
offset+i-1
)
else
setup_calendar_data(
DataVector,
data,
offset
)
end
end
function DataVector(data::AbstractVector, d::Date, hc::MarketCalendar)
DataVector(data, date_pos(hc, d)-1)
end
function add_missing_bdays(dates, cal)
out = Date[]
if length(dates) == bdayscount(cal, dates[1], dates[end]) + 1
return out
end
dates_counter = 1
for d in listbdays(cal, dates[1], dates[end])
if dates[dates_counter] > d
push!(out, d)
else
dates_counter += 1
end
end
out
end
function get_missing_bdays(data::MarketData{T}, id::T, r::UnitRange{Int}, col::Union{Symbol, String, ContinuousTerm, Term}) where {T}
mssngs = data_missing_bdays(data[id, col])
if mssngs === nothing
nothing
else
temp = mssngs[r]
if nnz(temp) == 0
nothing
else
temp
end
end
end
get_missing_bdays(data::MarketData{T}, id::T, r::UnitRange{Int}, col::InterceptTerm{true}) where {T} = get_missing_bdays(data, id, r, :intercept)
function get_missing_bdays(data::MarketData{T}, id::T, r::UnitRange{Int}, col::FunctionTerm) where {T}
combine_missing_bdays(get_missing_bdays.(Ref(data), id, Ref(r), col.args)...)
end
function get_missing_bdays(data::MarketData{T}, id::T, r::UnitRange{Int}, col::InteractionTerm) where {T}
combine_missing_bdays(get_missing_bdays.(Ref(data), id, Ref(r), col.terms)...)
end
function get_missing_bdays(data::MarketData{T}, id::T, r::UnitRange{Int}, col::StatsModels.LeadLagTerm{<:Any, typeof(lead)}) where {T}
mssngs = data_missing_bdays(data[id, col.term])
if mssngs === nothing
nothing
else
temp = mssngs[r .+ col.nsteps]
if nnz(temp) == 0
nothing
else
temp
end
end
end
function get_missing_bdays(data::MarketData{T}, id::T, r::UnitRange{Int}, col::StatsModels.LeadLagTerm{<:Any, typeof(lag)}) where {T}
mssngs = data_missing_bdays(data[id, col.term])
if mssngs === nothing
nothing
else
temp = mssngs[r .- col.nsteps]
if nnz(temp) == 0
nothing
else
temp
end
end
end
combine_missing_bdays(vals::Nothing...) = nothing
function combine_missing_bdays(vals::Union{Nothing, SparseVector{Bool, Int}}...)::SparseVector{Bool, Int}
i = findfirst(!isnothing, vals)
out = vals[i]
for j in i+1:length(vals)
if vals[j] !== nothing
out = out .| vals[j]
end
end
out
end
function Base.view(data::DataVector, r::UnitRange)
view(data.data, r)
end
function Base.view(data::DataVector, r::AbstractVector)
view(data.data, r)
end
function maximin(rs::UnitRange{Int}...)
max(first.(rs)...):min(last.(rs)...)
end
##################################################
# Basic get functions that return a DataVector
##################################################
@inline function Base.getindex(
data::MarketData{T},
id::T,
col::Symbol
) where {T}
if col ∈ keys(data.marketdata)
data.marketdata[col]
else
data.firmdata[id][col]
end
end
Base.getindex(data::MarketData{T}, id::T, col::String) where {T} = data[id, Symbol(col)]
Base.getindex(data::MarketData{T}, id::T, col::Union{Term, ContinuousTerm}) where {T} = data[id, col.sym]
Base.getindex(data::MarketData{T}, id::T, col::StatsModels.LeadLagTerm) where {T} = data[id, col.term]
##################################################
# Simple function to return single value
##################################################
function Base.getindex(
data::DataVector,
loc::Int
)
if loc ∉ interval(data)
missing
elseif data.missing_bdays !== nothing && data.missing_bdays[loc]
missing
else
data.data[loc]
end
end
function Base.getindex(
data::MarketData{T},
id::T,
dt::Date,
col::Union{AbstractString, Symbol}
) where {T}
col = data[id, col]
l = date_pos(calendar(data), dt)
col[l]
end
##################################################
# Basic get functions that return a view of the
# underlying data
##################################################
function index_values(r, x)
out = zeros(Int, length(x) - sum(x))
j = 0
for i in eachindex(r, x)
if !x[i]
j += 1
@inbounds out[j] = r[i]
end
end
out
end
Base.getindex(data::MarketData{T}, id::T, r::UnitRange, col::Symbol, missing_bdays::AbstractVector{Bool}) where {T} =
view(data[id, col], index_values(r, missing_bdays))
Base.getindex(data::MarketData{T}, id::T, r::UnitRange, col::Symbol, ::Nothing=nothing) where {T} =
view(data[id, col], r)
Base.getindex(data::MarketData{T}, id::T, r::UnitRange, col::String, missing_bdays=nothing) where {T} =
data[id, r, Symbol(col), missing_bdays]
Base.getindex(data::MarketData{T}, id::T, r::UnitRange, col::Union{Term, ContinuousTerm}, missing_bdays=nothing) where {T} =
data[id, r, col.sym, missing_bdays]
Base.getindex(data::MarketData{T}, id::T, r::UnitRange, col::StatsModels.LeadLagTerm{<:Any, typeof(lead)}, missing_bdays=nothing) where {T} =
data[id, r .+ col.nsteps, col.term, missing_bdays]
Base.getindex(data::MarketData{T}, id::T, r::UnitRange, col::StatsModels.LeadLagTerm{<:Any, typeof(lag)}, missing_bdays=nothing) where {T} =
data[id, r .- col.nsteps, col.term, missing_bdays]
Base.getindex(data::MarketData{T}, id::T, r::UnitRange, col::InterceptTerm{true}, missing_bdays=nothing) where {T} =
data[id, r, :intercept, missing_bdays]
##################################################
# Slightly more complex get functions to deal
# with interaction and function terms
# (these greatly slow down theprocess and are
# not recommended)
##################################################
@inline function Base.getindex(
data::MarketData{T},
id::T,
r::UnitRange,
col::InteractionTerm,
missing_bdays=nothing
) where {T}
l = length(r)
if missing_bdays !== nothing
l -= count(missing_bdays)
end
out = fill(1.0, l)
for t in col.terms
out .*= data[id, r, t, missing_bdays]
end
out = OffsetVector(out, r[1]-1)
if missing_bdays === nothing
view(out, r)
else
view(out, r[1]:r[end]-count(missing_bdays))
end
end
@inline function Base.getindex(
data::MarketData{T},
id::T,
r::UnitRange,
col::FunctionTerm,
missing_bdays=nothing
) where {T}
out = OffsetVector(col.f.((data[id, r, t, missing_bdays] for t in col.args)...), r[1]-1)
if missing_bdays === nothing
view(out, r)
else
view(out, r[1]:r[end]-count(missing_bdays))
end
end
##################################################
# Get functions that return a Fixed Table
# Starting with ones where the range is already
# defined (r), the assumption is this r
# is already checked and an error is returned
# if it is out of bounds anywhere
##################################################
@inline function Base.getindex(
data::MarketData{T},
id::T,
r::UnitRange{Int},
cols::SVector{N, CL},
missing_bdays::Union{Nothing, AbstractVector{Bool}};
req_length::Int=0,
col_names::SVector{N}=cols
) where {T, N, CL <: Union{String, Symbol}}
if missing_bdays !== nothing
@assert length(missing_bdays) == length(r) "Missing days is wrong length"
end
FixedTable(
getindex.(Ref(data), id, Ref(r), cols, Ref(missing_bdays)),
col_names,
req_length
)
end
function Base.getindex(
data::MarketData{T},
id::T,
r::UnitRange{Int},
cols::CL,
missing_bdays::Union{Nothing, AbstractVector{Bool}};
req_length::Int=0,
col_names=nothing
) where {T, CL <: MatrixTerm}
if missing_bdays !== nothing
@assert length(missing_bdays) == length(r) "Missing days is wrong length"
end
N = length(cols.terms)
FixedTable(
SVector{N}(data[id, r, col, missing_bdays] for col in cols.terms),
col_names === nothing ? SVector{N}(coefnames(cols.terms)) : col_names,
req_length
)
end
function Base.getindex(
data::MarketData{T},
id::T,
r::UnitRange{Int},
f::FormulaTerm,
missing_bdays=nothing;
req_length::Int=0,
check_intercept=true
) where {T}
f = adjust_formula(f; check_intercept)
sch = apply_schema(f, schema(f, data))
cols = MatrixTerm((sch.lhs, sch.rhs.terms...))
data[
id,
r,
cols,
missing_bdays=missing_bdays,
req_length=req_length
]
end
##################################################
# Get functions that return a Fixed Table
# these allow input for a date range which is
# converted to unitranges
##################################################
function Base.getindex(
data::MarketData{T},
id::T,
dates::ClosedInterval{Date},
cols::SVector{N};
) where {T, N}
r1 = date_range(calendar(data), dates)
r = maximin(r1, interval.(Ref(data), id, cols)...)
mssngs = combine_missing_bdays(get_missing_bdays.(Ref(data), id, Ref(r), cols)...)
data[id, r, cols, mssngs, req_length=length(r1)]
end
function Base.getindex(
data::MarketData{T},
id::T,
dates::ClosedInterval{Date},
cols::AbstractVector
) where {T}
cols = SVector{length(cols)}(cols)
data[id, dates, cols]
end
function Base.getindex(
data::MarketData{T},
id::T,
dates::ClosedInterval{Date},
f::FormulaTerm;
check_intercept=true
) where {T}
# this has to deal with schema here since in order to get missing bdays
# and other items schema is necessary
f = adjust_formula(f; check_intercept)
sch = apply_schema(f, schema(f, data))
out = (sch.lhs, sch.rhs.terms...)
r1 = date_range(calendar(data), dates)
r = maximin(r1, interval.(Ref(data), id, out)...)
mssngs = combine_missing_bdays(get_missing_bdays.(Ref(data), id, Ref(r), out)...)
data[id, r, MatrixTerm(out), mssngs, req_length=length(r1)]
end
##################################################
# getindex that simply returns a tuple if columns
# are not provided
##################################################
Base.getindex(data::MarketData{T}, id::T, r::Union{UnitRange, ClosedInterval{Date}}) where {T} = (data, id, r)
##################################################
# functions related to FixedTable
##################################################
function Base.getindex(data::FixedTable{N}, ::Colon, i::Int) where {N}
@assert 1 <= i <= N "Column not in data"
@inbounds data.data[i]
end
function Base.getindex(data::FixedTable{N}, i::Int, j::Int) where {N}
@assert 1 <= j <= N "Column not in data"
data.data[j][i]
end
function Base.getindex(
data::FixedTable{N, T, AV, CL},
::Colon,
col::CL
) where {N, T, AV, CL}
@assert col ∈ names(data) "Column is not in the data"
i = findfirst(==(col), names(data))
data[:, i]
end
function Base.getindex(data::FixedTable{N, T, AV, CL}, ::Colon, col) where {N, T, AV, CL}
data[:, CL(col)]
end
Base.names(x::FixedTable) = x.cols
# function Base.length(data::TimelineTable{false})
# real_dates = dates_min_max(data_dates(data), norm_dates(data))
# c = bdayscount(calendar(data), dt_min(real_dates), dt_max(real_dates)) + isbday(calendar(data), dt_max(real_dates))
# new_mssngs = get_missing_bdays(calendar(data), data_missing_bdays(data), data_dates(data), real_dates)
# return c - nnz(new_mssngs)
# end
function Base.length(x::DataVector)
if missing_bdays === nothing
length(raw_values(x))
else
length(data_missing_bdays(x)) - nnz(data_missing_bdays(x))
end
end
# DataFrames.dropmissing(data::TimelineTable{false}) = data
# DataFrames.allowmissing(data::TimelineTable{true}) = data
# function DataFrames.dropmissing(data::TimelineTable{true})
# TimelineTable(
# parent(data),
# data_id(data),
# AllowMissing{false},
# data_dates(data),
# DictIndex(names(data)),
# data_missing_bdays(data),
# norm_dates(data)
# )
# end
# function DataFrames.allowmissing(data::TimelineTable{false})
# TimelineTable(
# parent(data),
# data_id(data),
# AllowMissing{true},
# data_dates(data),
# DictIndex(names(data)),
# data_missing_bdays(data),
# norm_dates(data)
# )
# end
raw_values(x::DataVector) = x.data
data_missing_bdays(x::DataVector) = x.missing_bdays
interval(x::DataVector) = x.interval
interval(data::MarketData{T}, id::T, col::Union{Symbol, String, ContinuousTerm, Term}) where {T} = interval(data[id, col])
function interval(data::MarketData{T}, id::T, col::FunctionTerm) where {T}
maximin(interval.(Ref(data), id, col.args)...)
end
function interval(data::MarketData{T}, id::T, col::InteractionTerm) where {T}
maximin(interval.(Ref(data), id, col.terms)...)
end
function interval(data::MarketData{T}, id::T, col::StatsModels.LeadLagTerm{<:Any, typeof(lead)}) where {T}
r = interval(data[id, col.term])
maximin(r, r .- col.nsteps)
end
function interval(data::MarketData{T}, id::T, col::StatsModels.LeadLagTerm{<:Any, typeof(lag)}) where {T}
r = interval(data[id, col.term])
maximin(r, r .+ col.nsteps)
end
interval(data::MarketData{T}, id::T, col::InterceptTerm{true}) where {T} = interval(data[id, :intercept])
calendar(x::MarketData) = x.calendar
data_dates(x::MarketData) = x.dates
function Base.show(io::IO, data::MarketData{T}) where {T}
println(io, "MarketData with ID type $T with $(length(getfield(data, :firmdata))) unique firms")
println(io, data.calendar)
println(io, "Market Columns: $(join(keys(data.marketdata), ", "))")
println(io, "Firm Columns: $(join(keys(data.firmdata[first(keys(data.firmdata))]), ", "))")
end
dt_min(x::ClosedInterval{Date}) = x.left
dt_max(x::ClosedInterval{Date}) = x.right
cal_dt_min(x::MarketData) = cal_dt_min(calendar(x))
cal_dt_max(x::MarketData) = cal_dt_max(calendar(x))
##################################
# transformation function to add new column to data
# this is not necessarily optimized, but is
# faster for columns that are in marketdata
##################################
in_market_data(data::MarketData, t::Symbol) = t ∈ keys(data.marketdata)
in_market_data(data::MarketData, t::ContinuousTerm) = in_market_data(data, t.sym)
in_market_data(data::MarketData, t::InteractionTerm) = all(in_market_data.(Ref(data), t.terms))
in_market_data(data::MarketData, t::StatsModels.LeadLagTerm) = in_market_data(data, t.term)
in_market_data(data::MarketData, t::FunctionTerm) = all(in_market_data.(Ref(data), t.args))
function DataFrames.transform!(
data::MarketData,
f::FormulaTerm
)
sch = apply_schema(f, schema(f, data))
coef_new = coefnames(sch.lhs) |> Symbol
if all(in_market_data.(Ref(data), sch.rhs.terms))
id = first(keys(data.firmdata))
rs = maximin((interval(data, id, t) for t in sch.rhs.terms)...)
mssngs = combine_missing_bdays(get_missing_bdays.(Ref(data), id, Ref(rs), sch.rhs.terms)...)
vals = fill(0.0, length(rs))
for t in sch.rhs.terms
vals = vals .+ data[id, rs, t, nothing] # no missings since the vector should equal 0 for those and the mssngs data will be for that
end
data.marketdata[coef_new] = DataVector(
OffsetVector(vals, first(rs)-1),
mssngs,
rs
)
else
for id in keys(data.firmdata)
rs = maximin((interval(data, id, t) for t in sch.rhs.terms)...)
mssngs = combine_missing_bdays(get_missing_bdays.(Ref(data), id, Ref(rs), sch.rhs.terms)...)
vals = fill(0.0, length(rs))
for t in sch.rhs.terms
vals = vals .+ data[id, rs, t, nothing] # no missings since the vector should equal 0 for those and the mssngs data will be for that
end
data.firmdata[id][coef_new] = DataVector(
OffsetVector(vals, first(rs)-1),
mssngs,
rs
)
end
end
end
function adjust_formula(f; check_intercept=true)
if !check_intercept
f
elseif !StatsModels.omitsintercept(f) & !StatsModels.hasintercept(f)
FormulaTerm(f.lhs, InterceptTerm{true}() + f.rhs);
elseif StatsModels.omitsintercept(f)
FormulaTerm(f.lhs, Tuple(r for r in f.rhs if !isa(r, ConstantTerm)))
else
f
end
end | AbnormalReturns | https://github.com/junder873/AbnormalReturns.jl.git |
|
[
"MIT"
] | 0.3.0 | bd11a5492a2b50db534baec5826029e5f80a4445 | code | 6795 | using CSV, DataFrames, Dates, Test, BusinessDays, StaticArrays
using AbnormalReturns
##
df_firm = CSV.File(joinpath("data", "daily_ret.csv")) |> DataFrame
# one day (Permno 10104, 2019-01-17) is deleted to provide a test for missing data
df_mkt = CSV.File(joinpath("data", "mkt_ret.csv")) |> DataFrame
df_res = CSV.File(joinpath("data", "car_results.csv")) |> DataFrame
##
data = MarketData(
df_mkt,
df_firm
)
DataFrames.transform!(data, @formula(mkt ~ mktrf + rf))
##
@test AbnormalReturns.date_range(data.calendar, Date(2019, 3, 31) .. Date(2019, 4, 5)) == 62:66
@test AbnormalReturns.date_range(data.calendar, Date(2019, 3, 31) .. Date(2019, 4, 6)) == 62:66
@test AbnormalReturns.date_range(data.calendar, Date(2019, 4) .. Date(2019, 4, 5)) == 62:66
@test AbnormalReturns.date_range(data.calendar, Date(2019, 4) .. Date(2019, 4, 6)) == 62:66
##
@test isapprox(std(data[18428, Date(2019, 4) .. Date(2019, 10), ["ret"]]), 0.0224085; atol=.00001)
@test isapprox(var(data[18428, Date(2019, 4) .. Date(2019, 10), ["ret"]]), 0.0005021; atol=.00001)
@test isapprox.(std(data[[18428, 18428], [Date(2019, 4), Date(2019, 4)] .. [Date(2019, 10), Date(2019, 10)], ["ret"]]), 0.0224085; atol=.00001) |> all
@test isapprox.(var(data[[18428, 18428], [Date(2019, 4), Date(2019, 4)] .. [Date(2019, 10), Date(2019, 10)], ["ret"]]), 0.0005021; atol=.00001) |> all
@test isapprox(std(data[18428, Date(2019, 4) .. Date(2019, 10), ["ret", "mktrf"]]), 0.0189731; atol=.00001)
@test isapprox(var(data[18428, Date(2019, 4) .. Date(2019, 10), ["ret", "mktrf"]]), 0.00036; atol=.00001)
@test isapprox.(std(data[[18428, 18428], [Date(2019, 4), Date(2019, 4)] .. [Date(2019, 10), Date(2019, 10)], ["ret", "mktrf"]]), 0.0189731; atol=.00001) |> all
@test isapprox.(var(data[[18428, 18428], [Date(2019, 4), Date(2019, 4)] .. [Date(2019, 10), Date(2019, 10)], ["ret", "mktrf"]]), 0.00036; atol=.00001) |> all
##
rr = quick_reg(data[18428, Date(2019, 4) .. Date(2019, 10)], @formula(ret ~ mktrf + hml))
@test coefnames(rr) == SVector{3}("(Intercept)", "mktrf", "hml")
@test responsename(rr) == "ret"
@test nobs(rr) == 126
@test all(isapprox.(coef(rr), [-.00125105, 1.40602071, 1.19924984]; atol=.00001))
@test isapprox(r2(rr), .42340085667)
@test isapprox(adjr2(rr), .41402526)
@test dof_residual(rr) == 123
@test islinear(rr)
@test alpha(rr) == rr.coef[1]
@test beta(rr) == rr.coef[2]
##
rr = quick_reg(data[10104, Date(2019, 1, 2) .. Date(2019, 6)], @formula(ret ~ mktrf + hml))
@test nobs(rr) == 103 # one less due to missing date
@test all(isapprox.(coef(rr), [0.00036009, 0.886629, -0.0247203]; atol=.0001))
@test isapprox(r2(rr), 0.568637; atol=.0001)
##
# test that function term works and interactions work
# lag and lead terms with boundary
rr = quick_reg(data[11762, Date(2019, 1, 2) .. Date(2019, 6)], @formula(ret ~ log1p(mktrf) * lead(mktrf) + hml))
@test nobs(rr) == 104
@test isapprox(adjr2(rr), 0.5872816; atol=.000001)
@test all(isapprox.(coef(rr), [0.00010658, 1.233606, 0.0290669, 0.2685282, -1.80086095]; atol=.000001))
##
rr = quick_reg(data[11762, Date(2019, 1, 2) .. Date(2019, 6)], @formula(ret ~ log1p(mktrf) * lag(mktrf) + hml))
@test nobs(rr) == 103
@test isapprox(adjr2(rr), 0.61329402; atol=.000001)
@test all(isapprox.(coef(rr), [0.000478025, 1.262271804, -0.0727015, 0.3222624, 20.7364917]; atol=.000001))
##
# and again with a missing data point
rr = quick_reg(data[10104, Date(2019, 1, 10) .. Date(2019, 6)], @formula(ret ~ log1p(mktrf) * lead(mktrf) + hml))
@test nobs(rr) == 97
@test all(isapprox.(coef(rr), [0.000156196, 0.85712201, -0.13628144, -0.03920588, -12.1167821]; atol=.000001))
@test isapprox(adjr2(rr), 0.4994264; atol=.000001)
##
# lag and lead terms with boundary
rr = quick_reg(data[10104, Date(2019, 1, 10) .. Date(2019, 6)], @formula(ret ~ log1p(mktrf) * lag(mktrf) + hml))
@test nobs(rr) == 97
@test isapprox(adjr2(rr), 0.48525097; atol=.000001)
##
@test_throws KeyError quick_reg(data[1, Date(2020) .. Date(2021)], @formula(ret ~ mktrf))
rr = quick_reg(data[18428, Date(2019, 4) .. Date(2019, 10)], @formula(ret ~ mktrf + hml); minobs=1000)# not enough data case
@test ismissing(coefnames(rr))
@test ismissing(coef(rr))
@test nobs(rr) == 126
@test ismissing(r2(rr))
@test ismissing(adjr2(rr))
@test alpha(rr) |> ismissing
@test beta(rr) |> ismissing
##
# the SAS code that I tested this against appears to round results to 3 significant digits
# Since these are over specific periods, I specify those as functions to make this easier
event_start(x; data=data) = advancebdays(data.calendar, tobday(data.calendar, x), -10)
event_end(x; data=data) = advancebdays(data.calendar, tobday(data.calendar, x), 10)
est_end(x; data=data) = advancebdays(data.calendar, event_start(x), -16)
est_start(x; data=data) = advancebdays(data.calendar, est_end(x), -149)
rr_market = quick_reg(
data[df_res.permno, est_start.(df_res.event_date) .. est_end.(df_res.event_date)],
@formula(ret ~ mktrf)
)
@test isapprox(round.(alpha.(rr_market), digits=5), df_res.alpha_market_model_)
@test isapprox(round.(beta.(rr_market), digits=3), df_res.beta_market_model)
cars = car(data[df_res.permno, event_start.(df_res.event_date) .. event_end.(df_res.event_date)], rr_market)
@test isapprox(round.(cars, sigdigits=3), df_res.car_mm)
bhars = bhar(data[df_res.permno, event_start.(df_res.event_date) .. event_end.(df_res.event_date)], rr_market)
@test isapprox(round.(bhars, sigdigits=3), df_res.bhar_mm)
stds = std(rr_market)
vars = var(rr_market)
@test isapprox(round.(vars, digits=10), df_res.estimation_period_variance_market_model_)
##
rr_ff = quick_reg(
data[df_res.permno, est_start.(df_res.event_date) .. est_end.(df_res.event_date)],
@formula(ret ~ mktrf + smb + hml)
)
cars = car(data[df_res.permno, event_start.(df_res.event_date) .. event_end.(df_res.event_date)], rr_ff)
@test isapprox(round.(cars, sigdigits=3), df_res.car_ff)
bhars = bhar(data[df_res.permno, event_start.(df_res.event_date) .. event_end.(df_res.event_date)], rr_ff)
@test isapprox(round.(bhars, sigdigits=3), df_res.bhar_ff)
stds = std.(rr_ff)
vars = var.(rr_ff)
@test isapprox(round.(vars, digits=10), df_res.estimation_period_variance_ff_model_)
##
rr_ffm = quick_reg(
data[df_res.permno, est_start.(df_res.event_date) .. est_end.(df_res.event_date)],
@formula(ret ~ mktrf + smb + hml + umd)
)
cars = car(data[df_res.permno, event_start.(df_res.event_date) .. event_end.(df_res.event_date)], rr_ffm)
@test isapprox(round.(cars, sigdigits=3), df_res.car_ffm)
bhars = bhar(data[df_res.permno, event_start.(df_res.event_date) .. event_end.(df_res.event_date)], rr_ffm)
@test isapprox(round.(bhars, sigdigits=3), df_res.bhar_ffm)
stds = std.(rr_ffm)
vars = var.(rr_ffm)
@test isapprox(round.(vars, digits=10), df_res.estimation_period_variance_carhart_model_)
##
| AbnormalReturns | https://github.com/junder873/AbnormalReturns.jl.git |
|
[
"MIT"
] | 0.3.0 | bd11a5492a2b50db534baec5826029e5f80a4445 | docs | 1857 | [](https://junder873.github.io/AbnormalReturns.jl/dev/)
[](https://github.com/junder873/AbnormalReturns.jl/actions)
# AbnormalReturns.jl
This package is designed to quickly calculate abnormal returns on large datasets by running regressions on slices of dates. In finance and economics research, abnormal returns are common for event studies related to firms to interpret how the stock market perceives the event. For example, if a firm makes an announcement, did the market see that as good news? To what degree (i.e., how big are the returns)?
Calculating abnormal returns typically requires running regressions on a slice of the data (during an estimation window) and using those coefficients to predict what a firm's returns would be during an event window. The difference between the the actual returns and the expected returns is used as a measure of abnormal returns.
## Performance
This package is capable of calculating abnormal returns very quickly. On a Ryzen 7 5700X, it can calculate 1 million different regressions on different slices of data in under 0.5 seconds. In a larger benchmark using simulated data for 1 million firm-events, this package can calculate abnormal returns for all events using two methods (so 2 million total regressions, 2 million estimations and some other statistics) in 1.5 seconds. See the benchmark folder for more details.
## Acknowledgements
This package would not be possible without the extensive work done in [BusinessDays.jl](https://github.com/JuliaFinance/BusinessDays.jl) and [StatsModels.jl](https://github.com/JuliaStats/StatsModels.jl).
## Disclaimer
While this package does have testing, it is in beta. Methods might change and there could be errors.
| AbnormalReturns | https://github.com/junder873/AbnormalReturns.jl.git |
|
[
"MIT"
] | 0.3.0 | bd11a5492a2b50db534baec5826029e5f80a4445 | docs | 904 | # Benchmarking AbnormalReturns.jl
Since the goal of this package is to calculate abnormal returns on a large number of events, a large dataset is needed. Since I cannot publish actual market data due to copyright, "create_approximate_real.jl" downloads real market data and runs estimates to simulate what some real data would look like. It is not necessary to simulate actual data. The actual data is downloaded from the Wharton Research Database (WRDS) and uses CRSP and data from [Fama-French](https://mba.tuck.dartmouth.edu/pages/faculty/ken.french/data_library.html).
The file "create_data.jl" uses the real correlations and mean coefficients to build a sample of market level returns, firm specific returns for 10,000 returns, and 1 million firm events.
Finally, "run_benchmark.jl" loads the generated data and runs timings. I included the timings on two computers I have access to in that file. | AbnormalReturns | https://github.com/junder873/AbnormalReturns.jl.git |
|
[
"MIT"
] | 0.3.0 | bd11a5492a2b50db534baec5826029e5f80a4445 | docs | 714 |
# AbnormalReturns API
This package reexports [StatsModels.jl](https://raw.githubusercontent.com/JuliaStats/StatsModels.jl) and [BusinessDays.jl](https://github.com/JuliaFinance/BusinessDays.jl). See those packages and their respective documentation for methods that they export.
Notable methods from [StatsModels.jl](https://raw.githubusercontent.com/JuliaStats/StatsModels.jl):
- `@formula`
Notable methods from [BusinessDays.jl](https://github.com/JuliaFinance/BusinessDays.jl):
- `advancebdays`
- `bdayscount`
- `tobday`
## Setting Up Data
```@docs
MarketData
```
## Regression Related Methods
```@docs
quick_reg
BasicReg
alpha
beta
var
std
```
## Calculation Functions
```@docs
bhar
car
bh_return
``` | AbnormalReturns | https://github.com/junder873/AbnormalReturns.jl.git |
|
[
"MIT"
] | 0.3.0 | bd11a5492a2b50db534baec5826029e5f80a4445 | docs | 1461 | # AbnormalReturns Data Structure
The key to the performance in this package is the underlying data structure. These rely on a combination of [BusinessDays.jl](https://github.com/JuliaFinance/BusinessDays.jl) and [Tables.jl](https://github.com/JuliaData/Tables.jl) to provide fast access to slices of data based on dates.
## DataVector
```@docs
DataVector
```
These structures provide strongly typed data that is easy to slice based on a range of dates. The data is always stored as `Float64`, even though it accepts elements of type `Missing`. In storing the data, `Missing` values are converted to `0.0`, and the `missing_bdays` is a `SparseVector` that is `true` when that value is missing. `dates` are the minimum and maximum dates for the data.
Data in a `DataVector` is stored in an `OffsetVector` from [OffsetArrays.jl](https://github.com/JuliaArrays/OffsetArrays.jl), but this data is rarely accessed directly.
## MarketData
```julia
struct MarketData{T}
calendar::MarketCalendar
marketdata::Dict{Symbol, DataVector} # column names as symbols
firmdata::Dict{T, Dict{Symbol, DataVector}} # data stored by firm id and then by column name as symbol
end
```
This struct is made up of a set of `DataVector`. The main purpose of this is to provide efficient storage of the underlying data along with the calendar so that dates are easily translated to access the underlying data.
## FixedTable
```@docs
FixedTable
```
| AbnormalReturns | https://github.com/junder873/AbnormalReturns.jl.git |
|
[
"MIT"
] | 0.3.0 | bd11a5492a2b50db534baec5826029e5f80a4445 | docs | 11840 | # [AbnormalReturns Example](@id Example)
As a quick example:
```@example
data_dir = joinpath("..", "..", "test", "data") # hide
using CSV, DataFramesMeta, Dates, AbnormalReturns
df_firm = CSV.File(joinpath(data_dir, "daily_ret.csv")) |> DataFrame
df_mkt = CSV.File(joinpath(data_dir, "mkt_ret.csv")) |> DataFrame
df_mkt[!, :mkt] = df_mkt.mktrf .+ df_mkt.rf
df_events = CSV.File(joinpath(data_dir, "firm_earnings_announcements.csv")) |> DataFrame
mkt_data = MarketData(
df_mkt,
df_firm
)
df_events = @chain df_events begin
@rtransform(
:est_start = advancebdays(mkt_data.calendar, :ea, -120),
:est_end = advancebdays(mkt_data.calendar, :ea, -2),
:event_start = advancebdays(mkt_data.calendar, :ea, -1),
:event_end = advancebdays(mkt_data.calendar, :ea, 1),
)
@transform(:reg = quick_reg(mkt_data[:permno, :est_start .. :est_end], @formula(ret ~ mkt + smb + hml)))
@transform(
:bhar_reg = bhar(mkt_data[:permno, :event_start .. :event_end], :reg),
:bhar_simple = bhar(mkt_data[:permno, :event_start .. :event_end, ["ret", "mkt"]]),
:car_reg = car(mkt_data[:permno, :event_start .. :event_end], :reg),
:car_simple = car(mkt_data[:permno, :event_start .. :event_end, ["ret", "mkt"]]),
:total_ret = bh_return(mkt_data[:permno, :event_start .. :event_end, ["ret"]]),
:total_mkt_ret = bh_return(mkt_data[:permno, :event_start .. :event_end, ["mkt"]]),
)
@rtransform(
:std = std(:reg),
:var = var(:reg),
)
select(Not([:est_start, :est_end, :event_start, :event_end, :reg]))
# columns eliminated to save space:
select(Not([:car_reg, :car_simple, :var, :total_mkt_ret]))
end
show(df_events) # hide
```
## Data
For the basic data, this uses the files in the test folder of this package ("test\data"). The "daily\_ret.csv" file is a selection of firm returns, while "mkt\_ret.csv" includes the average market return along with some Fama-French factor returns, you can download similar Fama-French data from [here](https://mba.tuck.dartmouth.edu/pages/faculty/ken.french/data_library.html) or from [FamaFrenchData.jl](https://github.com/tbeason/FamaFrenchData.jl) and stock market data from [AlphaVantage.jl](https://github.com/ellisvalentiner/AlphaVantage.jl) or [WRDSMerger.jl](https://github.com/junder873/WRDSMerger.jl) (requires access to the WRDS database).
The firm data uses "Permno" to identify a stock. This package will work with other identifiers, as long as the identifier-date pair is unique. However, Integers and Symbols will often be fastest (as opposed to String identifiers).
```@setup main_run
data_dir = joinpath("..", "..", "test", "data") # hide
using CSV, DataFramesMeta, Dates, AbnormalReturns
```
Load the firm data:
```@example main_run
df_firm = CSV.File(joinpath(data_dir, "daily_ret.csv")) |> DataFrame
show(df_firm) # hide
```
and the market data:
```@example main_run
df_mkt = CSV.File(joinpath(data_dir, "mkt_ret.csv")) |> DataFrame
show(df_mkt) # hide
```
## Arranging and Accessing the Data
Next, load the data into a `MarketData` object:
```@example main_run
mkt_data = MarketData(
df_mkt,
df_firm;
id_col=:permno,# default
date_col_firms=:date,# default
date_col_market=:date,# default
add_intercept_col=true,# default
valuecols_firms=[:ret],# defaults to nothing, in which case
# all columns other than id and date are used
valuecols_market=[:mktrf, :rf, :smb, :hml, :umd]# defaults to
# nothing, in which case all columns other than date are used
)
show(mkt_data) # hide
```
!!! note
For performance, especially when loading large datasets of firm data, it is best to make sure the firm dataframe is presorted by ID then Date.
This object rearranges the data so it can be quickly accessed later. The `mkt_data` now contains 3 things:
1. A [BusinessDays.jl](https://github.com/JuliaFinance/BusinessDays.jl) calendar that exactly matches the days loaded in the market data.
2. Each column of the `df_mkt` stored
3. Each column of the `df_firm` stored in a `Dict` for each firm.
Data is accessed on a by firm basis, for a given date range and specific columns. For example, say you wanted to get the data for Oracle (ORCL) ("Permno" of 10104), for a specific date (using [IntervalSets.jl](https://github.com/JuliaMath/IntervalSets.jl)) and set of columns:
```@repl main_run
orcl_data = mkt_data[10104, Date(2020) .. Date(2020, 6, 30), [:ret, :mktrf, :smb]]
```
Sometimes it is helpful to add a new column (either for convenience or performance reasons, discussed later). To do so, this package borrows the `transform!` function from [DataFrames.jl](https://github.com/JuliaData/DataFrames.jl), using a formula where the left side is the column that is created:
```@repl main_run
transform!(mkt_data, @formula(mkt ~ mktrf + rf));
```
It is also easy to specify the columns as a formula from [StatsModels.jl](https://github.com/JuliaStats/StatsModels.jl). This allows for arbitrary functions, interactions and lags/leads:
```@repl main_run
orcl_data = mkt_data[10104, Date(2020) .. Date(2020, 6, 30), @formula(ret ~ mkt + lag(mkt) + log1p(smb) * hml)]
```
!!! note
While interactions and arbitrary functions are supported, they can significantly slow down performance since a new vector is allocated in each call. Therefore, it is generally recommended to create a new pice of data by calling `transform!` on the dataset to create the new columns. This advice does not apply to lag/lead terms since those do not need to allocate a new column.
The data returned by accessing `mkt_data` is a `FixedTable`, which is essentially a matrix with a fixed width (useful for multiplication and returning a `StaticMatrix` from [StaticArrays.jl](https://github.com/JuliaArrays/StaticArrays.jl)). Access into this data is done either by a slice as you would any other matrix:
```@repl main_run
orcl_data[:, 1]
```
Or via the names used to access it in the first place:
```@repl main_run
orcl_data[:, :mkt]
```
## Estimating Regressions
The main goal of this package is quickly running regressions for firm events. The example used here is a firm's earnings announcement. Starting with one example, Oracle announced its Q3 2020 earnings on 2020-9-10. Calculating abnormal returns typically follows three steps:
1. Estimate how the firm typically responds to market factors during a control (or estimation) window
2. Use the coefficients from that regression to estimate how the firm should do during the event window
3. Subtract the estimated return from the actual firm return during the event window. Depending on how this difference is aggregated, these are typically buy and hold abnormal returns (bhar) or cumulative abnormla returns (CAR)
First, to create the table for the estimation window, define an estimation window and an event window:
```@repl main_run
event_date = Date("2020-09-10")
est_start = advancebdays(mkt_data.calendar, event_date, -120)
est_end = advancebdays(mkt_data.calendar, event_date, -2)
event_start = advancebdays(mkt_data.calendar, event_date, -1)
event_end = advancebdays(mkt_data.calendar, event_date, 1)
```
Next, run the estimation regression (the regression automatically selects the correct columns from the data, so it is not necessary to do that beforehand):
```@example main_run
orcl_data = mkt_data[10104, est_start .. est_end]
rr = quick_reg(orcl_data, @formula(ret ~ mkt + smb + hml))
```
Then get the data for the event window:
```@repl main_run
orcl_data = mkt_data[10104, event_start .. event_end];
```
Now it is easy to run some statistics for the event window:
```@repl main_run
bhar(orcl_data, rr) # BHAR based on regression
car(orcl_data, rr) # CAR based on regression
```
It is also easy to calculate some statistics for the estimation window:
```@repl main_run
var(rr) # Variance of firm returns (similar equation for standard deviation)
beta(rr) # Firm's market beta
alpha(rr) # Firm's market alpha
```
## More Data Using DataFramesMeta
While the above works well, abnormal returns are often calculated on thousands or more firm-events. Here, I use earnings announcements for about 100 firms from March to November 2020:
```@repl main_run
df_events = CSV.File(joinpath(data_dir, "firm_earnings_announcements.csv")) |> DataFrame
```
Using [DataFramesMeta.jl](https://github.com/JuliaData/DataFramesMeta.jl) and the `@chain` macro from [Chain.jl](https://github.com/jkrumbiegel/Chain.jl), the above steps become:
```@example main_run
df_events = @chain df_events begin
@rtransform(
:est_start = advancebdays(mkt_data.calendar, :ea, -120),
:est_end = advancebdays(mkt_data.calendar, :ea, -2),
:event_start = advancebdays(mkt_data.calendar, :ea, -1),
:event_end = advancebdays(mkt_data.calendar, :ea, 1),
)
@rtransform(:reg = quick_reg(mkt_data[:permno, :est_start .. :est_end], @formula(ret ~ mkt + smb + hml)))
@rtransform(
:bhar_reg = bhar(mkt_data[:permno, :event_start .. :event_end], :reg),
:bhar_simple = bhar(mkt_data[:permno, :event_start .. :event_end, ["ret", "mkt"]]),
:std = std(:reg),
:total_ret = bh_return(mkt_data[:permno, :event_start .. :event_end, ["ret"]]),
)
select(Not([:est_start, :est_end, :event_start, :event_end, :reg]))
end
show(df_events) # hide
```
## Vectorizing the Data
While the above works, and is reasonably fast (Doing a test on 1 million regressions takes about 26 seconds on a Ryzen 7 5700X), faster is better.
In particular, a significant reason the above is slow method is that the formula is parsed for each iteration. If the formula is the same for all of the cases, it is better if it is simply parsed once. Therefore, it is optimal to do as much as possible using vectors.
To make this possible, this package provides a type `IterateFixedTable` which will return a `FixedTable` based on a supplied set of ids, dates and columns (or formula as above):
```@repl main_run
est_starts = advancebdays.(mkt_data.calendar, df_events.ea, -120)
est_ends = advancebdays.(mkt_data.calendar, df_events.ea, -2)
vec_data = mkt_data[df_events.permno, est_starts .. est_ends, [:ret, :mkt, :smb]]
```
Each element of `vec_data` is then easily accessible by an integer or can be looped over in a for loop:
```@example main_run
# for x in vec_data
# x
# end
# or
vec_data[10]
```
This object can be similarly passed to the above functions, just like a firm level table. The function will iterate through the data and return a vector of results.
However, the above is rather ugly. A more practical way to use this is to continue using the `@chain` macro:
```@example main_run
df_events = @chain df_events begin
@rtransform(
:est_start = advancebdays(mkt_data.calendar, :ea, -120),
:est_end = advancebdays(mkt_data.calendar, :ea, -2),
:event_start = advancebdays(mkt_data.calendar, :ea, -1),
:event_end = advancebdays(mkt_data.calendar, :ea, 1),
)
@transform(:reg = quick_reg(mkt_data[:permno, :est_start .. :est_end], @formula(ret ~ mkt + smb + hml)))
@transform(
:bhar_reg = bhar(mkt_data[:permno, :event_start .. :event_end], :reg),
:bhar_simple = bhar(mkt_data[:permno, :event_start .. :event_end, ["ret", "mkt"]]),
)
@transform(
:std = std.(:reg),
:total_ret = bh_return(mkt_data[:permno, :event_start .. :event_end, ["ret"]]),
)
select(Not([:est_start, :est_end, :event_start, :event_end, :reg]))
end
show(df_events) # hide
```
Notice that the only difference between these two `@chain` macros is that this one uses `@transform` instead of `@rtransform`. This sends the entire column vector to the function, and allows for much faster overall results. Those same 1 million regressions now takes just 0.44 seconds on the same computer.
| AbnormalReturns | https://github.com/junder873/AbnormalReturns.jl.git |
|
[
"MIT"
] | 0.3.0 | bd11a5492a2b50db534baec5826029e5f80a4445 | docs | 2469 |
# AbnormalReturns Documentation
This package provides functionality for getting slices of firm and market return data and running firm-specific regressions commonly used to calculate abnormal stock returns (actual stock return minus a benchmark). These are common in event studies in finance and economics and often require running a large number of regressions on different slices of firm and market data.
Most of the documentation is currently in the [example](@ref Example).
## Motivation
When estimating abnormal returns, it is common to estimate how the firm's return typically responds during an estimation window and use those predicted results in an event window:

The exact length of the estimation and event windows varies, but are typically about 150 and 3-5, respectively. The estimation is typically is a linear regression of firm specific return on market-wide factors.
### The Problem
Estimating abnormal returns requires getting two separate slices of data (for the estimation window and event window) for each firm-event. This is relatively trivial for small datasets, but abnormal returns are often calculated for a large number of events. For example, there are over 600,000 firm earnings announcements since 1990.
Generally, creating the dataset is done through a range join (e.g., gather all firm data between the start and end of the estimation window), which is often time consuming and/or creates huge datasets.
### This Package
This package uses a custom data structure to avoid repeating the data. The data structure is built on [BusinessDays.jl](https://github.com/JuliaFinance/BusinessDays.jl), making it easy to get a slice of data between two dates. It also implements threaded solutions to make the regressions and aggregation as fast as possible.
In a benchmark on 1 million firm events, it runs all the regressions in under 3 seconds. In a larger benchmark with two different models (so 2 million regressions) and calculating abnormal returns for the events, along with other basic statistics, it takes less than 9 seconds on a Ryzen 5 3600.
## Acknowledgements
This package would not be possible without [BusinessDays.jl](https://github.com/JuliaFinance/BusinessDays.jl), which is used for all of the date operations in this package and [StatsModels.jl](https://github.com/JuliaStats/StatsModels.jl), which provides an incredible `@formula` macro and the functionality that comes with that. | AbnormalReturns | https://github.com/junder873/AbnormalReturns.jl.git |
|
[
"MIT"
] | 0.3.1 | dfc5d2c38b4cf146901444165178e9041812fd69 | code | 4077 | module StrFs
export StrF, @strf_str
using StaticArrays: SVector
using UnPack: @unpack
import Base: sizeof, read, write, isless, cmp, ==, typemin, repeat, promote_rule, show,
codeunit, hash, length
"""
StrF{S}(::String)
A string of less than `S` bytes, represented using 0-terminated UTF-8.
# Internal representation
`bytes` are stored as an `SVector{S, UInt8}`, where `S` is the maximum number of bytes. They
are not validated to be conforming UTF-8. A terminating `0x00` follows **if and only if**
the string is shorter than `S` bytes.
This follows the `str#` ("sturfs") representation of [Stata](https://www.stata.com/help.cgi?dta).
# Operations
A comparisons, conversion, and a few operations are supported, but this is primarily meant
as a storage type. For complex manipulations, it is recommended that you convert to
`String`.
"""
struct StrF{S} <: AbstractString
bytes::SVector{S,UInt8}
end
macro strf_str(str)
StrF(str)
end
show(io::IO, str::StrF) = show(io, String(str))
# this implementation is a modified copy from base/hashing2.jl
function hash(str::StrF, h::UInt)
h += Base.memhash_seed
# note: use pointer(s) here (see #6058).
ccall(Base.memhash, UInt, (Ptr{UInt8}, Csize_t, UInt32),
Base.cconvert(Ptr{UInt8}, str.bytes), sizeof(str), h % UInt32) + h
end
promote_rule(::Type{String}, ::Type{StrF{S}}) where S = String
promote_rule(::Type{StrF{A}}, ::Type{StrF{B}}) where {A,B} = StrF{max(A,B)}
codeunit(::StrF{S}) where S = UInt8
function sizeof(str::StrF{S}) where S
nul = findfirst(isequal(0x00), str.bytes)
if nul ≡ nothing
S
else
nul - 1
end
end
read(io::IO, ::Type{StrF{S}}) where S = StrF{S}(read(io, SVector{S,UInt8}))
write(io::IO, str::StrF) = write(io, str.bytes)
String(str::StrF{S}) where S = String(str.bytes[1:sizeof(str)])
function StrF{S}(str::AbstractString) where S
@assert codeunit(str) ≡ UInt8
len = sizeof(str)
len ≤ S || throw(InexactError(:StrF, StrF{S}, str))
bytes = Vector{UInt8}(undef, S)
copyto!(bytes, codeunits(str))
if len < S
bytes[len + 1] = UInt8(0)
end
StrF{S}(SVector{S,UInt8}(bytes))
end
function StrF{A}(str::StrF{B}) where {A,B}
if A == B
str
elseif A > B
StrF{A}(vcat(str.bytes, zeros(SVector{A - B,UInt8})))
elseif sizeof(str) ≤ A
StrF{A}(str.bytes[1:A])
else
throw(InexactError(:StrF, StrF{A}, str))
end
end
function StrF(str::AbstractString)
@assert codeunit(str) ≡ UInt8
S = sizeof(str)
StrF{S}(SVector{S,UInt8}(codeunits(str)))
end
function cmp(a::StrF{A}, b::StrF{B}) where {A, B}
for (achar, bchar) in zip(a.bytes, b.bytes)
(achar == bchar == 0x0) && return 0
c = cmp(achar, bchar)
c ≠ 0 && return c
end
A == B && return 0
(A < B && b.bytes[A+1] ≠ 0) && return -1
(A > B && a.bytes[B+1] ≠ 0) && return 1
0
end
cmp(a::StrF, b::AbstractString) = cmp(a, StrF(b)) # TODO improve
cmp(a::AbstractString, b::StrF) = -cmp(b, a)
isless(a::StrF, b::StrF) = cmp(a, b) < 0
(==)(a::StrF, b::StrF) = cmp(a, b) == 0
typemin(::StrF{S}) where S = StrF(zeros(SVector{S}))
length(str::StrF) = length(String(str)) # TODO improve
function repeat(str::StrF{S}, ::Val{n}) where {S, n}
@unpack bytes = str
s = sizeof(str)
vS = n * s
v = Vector{UInt8}(undef, vS)
offset = 1
for _ in 1:n
copyto!(v, offset, bytes, 1, s)
offset += s
end
if offset < vS
v[offset] = 0
end
StrF{S}(SVector{S}(v))
end
function Base.iterate(str::StrF, state = (String(str), ))
# NOTE: iteration implemented by converting to a string, and using it as the first
# element of the state. The second element is the state for the iterator of the latter.
y = iterate(state...)
y ≡ nothing && return y
first(y), (first(state), last(y))
end
Base.IteratorSize(::Type{<:StrF}) = Base.IteratorSize(String)
Base.IteratorEltype(::Type{<:StrF}) = Base.IteratorEltype(String)
Base.eltype(::Type{<:StrF}) = Base.eltype(String)
end # module
| StrFs | https://github.com/tpapp/StrFs.jl.git |
|
[
"MIT"
] | 0.3.1 | dfc5d2c38b4cf146901444165178e9041812fd69 | code | 2821 | using StrFs, Test
randstr(pieces::AbstractVector, K) = string((rand(pieces) for _ in 1:K)...)
randstr(pieces::String, K) = randstr(collect(pieces), K)
@testset "literal and show" begin
@test strf"λ the ultimate" ≡ StrF("λ the ultimate")
str = strf"λ the ultimate"
@test repr(str) == "\"λ the ultimate\""
end
@testset "conversions" begin
for _ in 1:100
str = randstr("aα∃", rand(3:5))
len = sizeof(str)
for _ in 1:100
# to string and back
S = len + rand(0:2)
strf = StrF{S}(str)
@test sizeof(strf) == len
@test length(strf) == length(str)
str2 = String(strf)
@test str2 == str
# to other strfs types
ssame = StrF{S}(strf)
slong = StrF{S + 1}(strf)
@test ssame ≡ strf
@test slong.bytes[1:(len+1)] == vcat(codeunits(str), [0x00])
@test_throws InexactError StrF{len-1}(strf)
# io
io = IOBuffer()
write(io, strf)
seekstart(io)
@test strf == read(io, StrF{len})
end
end
end
@testset "promotion" begin
p1 = [strf"a", strf"bb", "ccc"]
@test p1 isa Vector{String}
p2 = [strf"a", strf"bb"]
@test p2 isa Vector{StrF{2}}
end
"Convert to StrF, padding size with `Δs`."
function StrFmultilen(str, Δs)
S = sizeof(str)
Any[StrF{S + Δ}(str) for Δ in Δs]
end
@testset "concatenation, comparisons, and hashing" begin
for _ in 1:1000
str = randstr("abcηβπ", rand(2:8))
stra = str * "a"
strλ = str * "λ"
fstr = StrFmultilen(str, 0:2)
fstra = StrFmultilen(stra, 0:2)
fstrλ = StrFmultilen(strλ, 0:2)
@test all(str .== fstr)
@test all(fstr .== permutedims(fstr))
@test all(fstr .== permutedims(fstr))
@test all(str .< fstra)
@test all(fstr .< permutedims(fstra))
@test all(fstra .< permutedims(fstrλ))
@test all(hash(str) .== hash.(fstr))
end
end
@testset "shortening conversions" begin
str = StrF{6}("foo")
str3 = StrF{3}(str)
str4 = StrF{4}(str)
str5 = StrF{5}(str)
@test str == str3 == str4 == str5
@test_throws InexactError StrF{2}(str)
end
@testset "zero length strings" begin
s0 = StrF{0}("")
s = StrF("")
@test length(s) == length(s0) == 0
@test s == s0 == StrF{1}("")
end
@testset "iteration" begin
for s in ["abc", "ηβπ", "∇κ≠0 ↔ ∃ζ>0"]
@test collect(StrF(s)) == collect(s)
end
for _ in 1:1000
s = randstr("aα∃bc", rand(3:5))
@test collect(StrF(s)) == collect(s)
end
S = StrF{9}
@test eltype(S) ≡ eltype(String)
@test Base.IteratorSize(S) ≡ Base.IteratorSize(String)
@test Base.IteratorEltype(S) ≡ Base.IteratorEltype(S)
end
| StrFs | https://github.com/tpapp/StrFs.jl.git |
|
[
"MIT"
] | 0.3.1 | dfc5d2c38b4cf146901444165178e9041812fd69 | docs | 1384 | # StrFs.jl

[](https://github.com/tpapp/StrFs.jl/actions?query=workflow%3ACI)
[](http://codecov.io/github/tpapp/StrFs.jl?branch=master)
Julia packages for strings with fixed maximum number of bytes.
## Overview
`StrF{S} <: AbstractString` can be used for strings up to `S` bytes in [UTF-8](https://en.wikipedia.org/wiki/UTF-8) encoding. When the string has less than that many bytes, it is terminated with a `0x00`.
This mirrors the way [Stata DTA files encode fixed length strings](https://www.stata.com/help.cgi?dta) (`str#`), but other applications may also find this useful. `StrF{S}` strings are implemented by wrapping an `SVector{S,UInt8}`, with the potential efficiency gains that entails.
## Examples
```julia
julia> using StrFs
julia> gender = [strf"male", strf"female"]
2-element Array{StrF{6},1}:
"male"
"female"
julia> gender[1] == "male"
true
julia> issorted(gender, rev = true)
true
julia> motto = StrF{6}("ηβπ") # uses all bytes
"ηβπ"
julia> sizeof(motto)
6
julia> length(motto)
3
julia> motto == StrF{10}("ηβπ") # 0x00 at byte 7
true
```
## Related
See [StataDTAFiles.jl](https://github.com/tpapp/StataDTAFiles.jl).
| StrFs | https://github.com/tpapp/StrFs.jl.git |
|
[
"MIT"
] | 0.2.1 | 50bb6c84118d1454850251c80ec23e86b8d9a693 | code | 296 | using MultiAgentSysAdmin
using Documenter
makedocs(;
sitename="MultiAgentSysAdmin.jl",
authors="rejuvyesh <[email protected]> and contributors",
modules=[MultiAgentSysAdmin],
format=Documenter.HTML()
)
deploydocs(;
repo="github.com/JuliaPOMDP/MultiAgentSysAdmin.jl.git",
)
| MultiAgentSysAdmin | https://github.com/JuliaPOMDP/MultiAgentSysAdmin.jl.git |
|
[
"MIT"
] | 0.2.1 | 50bb6c84118d1454850251c80ec23e86b8d9a693 | code | 9943 | module MultiAgentSysAdmin
# Write your package code here.
using StaticArrays
using Graphs
using POMDPs
using Random
using MultiAgentPOMDPs
using POMDPTools: Deterministic
const MAPOMDPs = MultiAgentPOMDPs
"""
- `status`: {good, faulty, dead}
- `load`: {idle, loaded, success}
"""
const MachineState = SVector{2, Int}
const MachineAction = @SArray [0, 1] # noop, reboot
"""
- `p_fail_base`:
- `p_fail_bonus`:
- `p_dead_base`:
- `p_dead_bonus`:
- `p_load`: probability of getting a job when idle.
- `p_doneG`: probability of completing a job when good.
- `p_doneF`: probability of completing a job when faulty.
`p_fail_bonus` and `p_dead_bonus` are additional bonuses counted
when all neighbors are faulty. Counted per agent.
If a machine with 2 neighbors has a single faulty neighbor, it will get
an additional failing probability of `p_fail_bonus/2`. If the same machine
has one faulty neighbor and one dead neighbor, it will get a penalty of
`p_fail_bonus/2 + p_dead_bonus/2`.
"""
abstract type AbstractSysAdmin{Bool} <: JointMDP{Vector{MachineState}, Vector{Int}} end
Base.@kwdef struct UniSysAdmin{T} <: AbstractSysAdmin{T}
nagents::Int = 4
# status
p_fail_base::Float64 = 0.4
p_fail_bonus::Float64 = 0.2
p_dead_base::Float64 = 0.1
p_dead_bonus::Float64 = 0.5
# load
p_load::Float64 = 0.6
p_doneG::Float64 = 0.9
p_doneF::Float64 = 0.6
discount::Float64 = 0.9
reboot_penalty = -0.7
end
Base.@kwdef struct BiSysAdmin{T} <: AbstractSysAdmin{T}
nagents::Int = 4
# status
p_fail_base::Float64 = 0.4
p_fail_bonus::Float64 = 0.2
p_dead_base::Float64 = 0.1
p_dead_bonus::Float64 = 0.5
# load
p_load::Float64 = 0.6
p_doneG::Float64 = 0.9
p_doneF::Float64 = 0.6
discount::Float64 = 0.9
reboot_penalty = -0.0
end
Base.@kwdef struct RingofRingSysAdmin{T} <: AbstractSysAdmin{T}
nrings::Int = 3
nagents_per_ring::Int = 3
# status
p_fail_base::Float64 = 0.4
p_fail_bonus::Float64 = 0.2
p_dead_base::Float64 = 0.1
p_dead_bonus::Float64 = 0.5
# load
p_load::Float64 = 0.6
p_doneG::Float64 = 0.9
p_doneF::Float64 = 0.6
discount::Float64 = 0.9
reboot_penalty = -0.0
end
Base.@kwdef struct StarSysAdmin{T} <: AbstractSysAdmin{T}
nagents::Int = 4
# status
p_fail_base::Float64 = 0.4
p_fail_bonus::Float64 = 0.2
p_dead_base::Float64 = 0.1
p_dead_bonus::Float64 = 0.5
# load
p_load::Float64 = 0.6
p_doneG::Float64 = 0.9
p_doneF::Float64 = 0.6
discount::Float64 = 0.9
reboot_penalty = -0.0
end
Base.@kwdef struct RandomSysAdmin{T} <: AbstractSysAdmin{T}
nagents::Int = 4
nedges::Int = 5
seed::Int = 1
# status
p_fail_base::Float64 = 0.4
p_fail_bonus::Float64 = 0.2
p_dead_base::Float64 = 0.1
p_dead_bonus::Float64 = 0.5
# load
p_load::Float64 = 0.6
p_doneG::Float64 = 0.9
p_doneF::Float64 = 0.6
discount::Float64 = 0.9
reboot_penalty = -0.0
end
POMDPs.discount(p::AbstractSysAdmin) = p.discount
#POMDPs.isterminal(p::AbstractSysAdmin, s) = all(x->x[2] == 3) # XXX
POMDPs.isterminal(p::AbstractSysAdmin, s) = false
POMDPs.actionindex(p::AbstractSysAdmin, a, c) = findfirst(isequal(a), MachineAction)
MAPOMDPs.n_agents(p::AbstractSysAdmin) = p.nagents
MAPOMDPs.n_agents(p::RingofRingSysAdmin) = p.nagents_per_ring * p.nrings
MAPOMDPs.agent_actions(p::AbstractSysAdmin, idx::Int64, s::MachineState) = MachineAction
MAPOMDPs.agent_actions(p::AbstractSysAdmin, idx::Int64) = MachineAction
MAPOMDPs.agent_actionindex(p::AbstractSysAdmin, idx::Int64, a) = findfirst(isequal(a), MAPOMDPs.agent_actions(p, idx))
POMDPs.actions(p::AbstractSysAdmin) = vec(map(collect, Iterators.product((MAPOMDPs.agent_actions(p, i) for i in 1:MAPOMDPs.n_agents(p))...)))
POMDPs.actionindex(p::AbstractSysAdmin, a) = findfirst(isequal(a), POMDPs.actions(p))
function coord_graph_adj_mat(p::UniSysAdmin)
mat = zeros(Int64, p.nagents, p.nagents)
for i in 1:p.nagents-1
mat[i, i+1] = 1
end
mat[p.nagents, 1] = 1
return mat
end
function coord_graph_adj_mat(p::BiSysAdmin)
mat = zeros(Int64, p.nagents, p.nagents)
for i in 1:p.nagents-1
mat[i, i+1] = 1
end
mat[p.nagents, 1] = 1
mat = mat + mat'
return mat
end
function coord_graph_adj_mat(p::RingofRingSysAdmin)
na = MAPOMDPs.n_agents(p)
mat = zeros(Int64, na, na)
# Inner ring
for idx in Iterators.product(1:p.nrings:na, 1:p.nrings:na)
if idx[1] == idx[2]
continue
end
mat[idx...] = 1
end
# rings on inner ring
for i in 1:p.nrings:na-1
for j in i:(i+p.nagents_per_ring-2)
mat[j, j+1] = 1
mat[j+1, j] = 1
end
mat[i, i+p.nagents_per_ring-1] = 1
mat[i+p.nagents_per_ring-1, i] = 1
end
return mat
end
function coord_graph_adj_mat(p::RandomSysAdmin)
return Matrix(adjacency_matrix(coordination_graph(p)))
end
function MAPOMDPs.coordination_graph(p::UniSysAdmin)
DiGraph(coord_graph_adj_mat(p))
end
function MAPOMDPs.coordination_graph(p::BiSysAdmin)
SimpleGraph(coord_graph_adj_mat(p))
end
function MAPOMDPs.coordination_graph(p::RandomSysAdmin)
SimpleGraph{Int}(p.nagents, p.nedges, seed=p.seed)
end
function MAPOMDPs.coordination_graph(p::RingofRingSysAdmin)
SimpleGraph(coord_graph_adj_mat(p))
end
function coord_graph_adj_mat(p::StarSysAdmin)
mat = zeros(Int64, p.nagents, p.nagents)
for i in 2:p.nagents
mat[1, i] = 1
mat[i, 1] = 1
end
return mat
end
function MAPOMDPs.coordination_graph(p::StarSysAdmin)
SimpleGraph(coord_graph_adj_mat(p))
end
# status: good, fail, dead
# load: idle, work, done
MAPOMDPs.agent_states(p::AbstractSysAdmin, idx::Int64) = vec(MachineState[MachineState(status,load) for status in 1:3, load in 1:3])
MAPOMDPs.agent_stateindex(p::AbstractSysAdmin, idx::Int64, s) = findfirst(isequal(s), MAPODMPs.agent_states(p, idx))
POMDPs.states(p::AbstractSysAdmin) = vec(map(collect, Iterators.product((MAPOMDPs.agent_states(p, i) for i in 1:MAPOMDPs.n_agents(p))...)))
POMDPs.stateindex(p::AbstractSysAdmin, s) = findfirst(isequal(s), POMDPs.states(p))
function POMDPs.initialstate(p::AbstractSysAdmin)
return Deterministic(MachineState[MachineState(1, 1) for _ in 1:MAPOMDPs.n_agents(p)])
end
function sysadmin_loop(p, s, a, rng)
coordgraph = MAPOMDPs.coordination_graph(p) #SimpleGraph(coord_graph_adj_mat(p))
sp_vec = Vector{MachineState}(undef, MAPOMDPs.n_agents(p))
r_vec = Vector{Float64}(undef, MAPOMDPs.n_agents(p))
for aidx in 1:MAPOMDPs.n_agents(p)
rew = 0.0
bonus = 0.0
neighs = neighbors(coordgraph, aidx)
for neigh in neighs
status = s[neigh][1]
if status == 2 # neighbor Fail
bonus += p.p_fail_bonus
elseif status == 3 # neighbor dead
bonus += p.p_dead_bonus
end
end
bonus /= length(neighs)
p_fail = p.p_fail_base + bonus
p_dead = p.p_dead_base + bonus
# Rewards only if noop
if a[aidx] == 0 # noop
status = s[aidx][1]
if status == 1 # Good
if rand(rng) < p_fail
newstatus = 2
else
newstatus = 1
end
elseif status == 2
if rand(rng) < p_dead
newstatus = 3
else
newstatus = 2
end
elseif status == 3
newstatus = 3
end
load = s[aidx][1]
if load == 1 # idle
if newstatus == 1
if rand(rng) < p.p_load
newload = 2
else
newload = 1
end
elseif newstatus == 2
if rand(rng) < p.p_load
newload = 2
else
newload = 1
end
elseif newstatus == 3
newload = 1
end
elseif load == 2 # work
if newstatus == 1
if rand(rng) < p.p_doneG
newload = 3
rew = 1.0 # finish reward
else
newload = 2
end
elseif newstatus == 2
if rand(rng) < p.p_doneF
newload = 3
rew = 1.0 # finish reward
else
newload = 2
end
elseif newstatus == 3 # dead
newload = 1
end
elseif load == 3 # done
newload = 3
end
else # reboot
newstatus = 1 # Good
newload = 1
rew += p.reboot_penalty
end
sp_vec[aidx] = MachineState(newstatus, newload)
r_vec[aidx] = rew
end
return (sp=sp_vec, r=r_vec)
end
"""
Basically, the only way we can get reward is by:
- Starting from the Load state (since it's the only one that can complete)
- Doing action 0;
- And ending up in the Done state.
dead machine increases the probability that its neighbors become faulty and die
system receives a reward of 1 if a process terminates successfully
status is faulty, processes take longer to terminate
If the machine dies, the process is lost.
"""
function POMDPs.gen(p::AbstractSysAdmin{false}, s, a, rng)
return sysadmin_loop(p, s, a, rng)
end
function POMDPs.gen(p::AbstractSysAdmin{true}, s, a, rng)
sp_vec, r_vec = sysadmin_loop(p, s, a, rng)
return (sp=sp_vec, r=sum(r_vec))
end
export UniSysAdmin, BiSysAdmin, RingofRingSysAdmin, RandomSysAdmin
end
| MultiAgentSysAdmin | https://github.com/JuliaPOMDP/MultiAgentSysAdmin.jl.git |
|
[
"MIT"
] | 0.2.1 | 50bb6c84118d1454850251c80ec23e86b8d9a693 | code | 502 | using Random
using Test
using MultiAgentPOMDPs
using MultiAgentSysAdmin
using POMDPs
@testset "MultiAgentSysAdmin.jl" begin
# Write your tests here.
for sysfn in [UniSysAdmin, BiSysAdmin, RingofRingSysAdmin, RandomSysAdmin]
for g in (true, false)
sys = sysfn{g}()
@test sys isa sysfn
s = rand(initialstate(sys))
a = ones(Int, n_agents(sys))
@inferred @gen(:sp, :r)(sys, s, a, MersenneTwister(42))
end
end
end
| MultiAgentSysAdmin | https://github.com/JuliaPOMDP/MultiAgentSysAdmin.jl.git |
|
[
"MIT"
] | 0.2.1 | 50bb6c84118d1454850251c80ec23e86b8d9a693 | docs | 606 | # MultiAgentSysAdmin
[](https://github.com/JuliaPOMDP/MultiAgentSysAdmin.jl/actions/workflows/ci.yml)
[](http://codecov.io/github/JuliaPOMDP/MultiAgentSysAdmin.jl?branch=master)
[](https://juliapomdp.github.io/MultiAgentSysAdmin.jl/stable)
[](https://juliapomdp.github.io/MultiAgentSysAdmin.jl/dev)
| MultiAgentSysAdmin | https://github.com/JuliaPOMDP/MultiAgentSysAdmin.jl.git |
|
[
"MIT"
] | 0.2.1 | 50bb6c84118d1454850251c80ec23e86b8d9a693 | docs | 134 | ```@meta
CurrentModule = MultiAgentSysAdmin
```
# MultiAgentSysAdmin
```@index
```
```@autodocs
Modules = [MultiAgentSysAdmin]
```
| MultiAgentSysAdmin | https://github.com/JuliaPOMDP/MultiAgentSysAdmin.jl.git |
|
[
"MIT"
] | 0.4.0 | 715de5b4530e466006f4b64817a076209a6a135d | code | 35 | println("Building NestedTests...")
| NestedTests | https://github.com/tanaylab/NestedTests.jl.git |
|
[
"MIT"
] | 0.4.0 | 715de5b4530e466006f4b64817a076209a6a135d | code | 1278 | # Build the documentations locally into `docs` so they will appear in the github pages. This way, in github we have the
# head version documentation, while in the standard Julia packages documentation we have the documentation of the last
# published version.
using Documenter
using Logging
using LoggingExtras
seen_problems = false
detect_problems = EarlyFilteredLogger(global_logger()) do log_args
if log_args.level >= Logging.Warn
global seen_problems
seen_problems = true
end
return true
end
global_logger(detect_problems)
push!(LOAD_PATH, ".")
using NestedTests
using Pkg
PROJECT_TOML = Pkg.TOML.parsefile(joinpath(@__DIR__, "..", "Project.toml"))
VERSION = PROJECT_TOML["version"]
NAME = PROJECT_TOML["name"]
AUTHORS = PROJECT_TOML["authors"]
REPO = "https://github.com/tanaylab/$(NAME).jl"
makedocs(;
authors = join(" ", AUTHORS),
build = "../docs/v$(VERSION)",
source = "../src",
clean = true,
doctest = true,
modules = [NestedTests],
highlightsig = true,
sitename = "$(NAME).jl v$(VERSION)",
draft = false,
linkcheck = true,
format = Documenter.HTML(; repolink = "$(REPO)/blob/main{path}?plain=1#L{line}", prettyurls = false),
pages = ["index.md"],
)
if seen_problems
exit(1)
end
| NestedTests | https://github.com/tanaylab/NestedTests.jl.git |
|
[
"MIT"
] | 0.4.0 | 715de5b4530e466006f4b64817a076209a6a135d | code | 1304 | using Logging
using LoggingExtras
seen_problems = false
detect_problems = EarlyFilteredLogger(global_logger()) do log_args
if log_args.level >= Logging.Warn
global seen_problems
seen_problems = true
end
return true
end
global_logger(detect_problems)
using JuliaFormatter
format(
".";
indent = 4,
margin = 120,
always_for_in = true,
for_in_replacement = "in",
whitespace_typedefs = true,
whitespace_ops_in_indices = true,
remove_extra_newlines = true,
import_to_using = false,
pipe_to_function_call = false,
short_to_long_function_def = true,
long_to_short_function_def = false,
always_use_return = true,
whitespace_in_kwargs = true,
annotate_untyped_fields_with_any = true,
format_docstrings = true,
conditional_to_if = true,
normalize_line_endings = "unix",
trailing_comma = true,
trailing_zero = true,
join_lines_based_on_source = false,
indent_submodule = false,
separate_kwargs_with_semicolon = true,
surround_whereop_typeparameters = true,
overwrite = true,
format_markdown = true,
align_assignment = false,
align_struct_field = false,
align_conditional = false,
align_pair_arrow = false,
align_matrix = true,
)
if seen_problems
exit(1)
end
| NestedTests | https://github.com/tanaylab/NestedTests.jl.git |
|
[
"MIT"
] | 0.4.0 | 715de5b4530e466006f4b64817a076209a6a135d | code | 417 | using JET
using Logging
using LoggingExtras
seen_problems = false
detect_problems = EarlyFilteredLogger(global_logger()) do log_args
if log_args.level >= Logging.Warn
global seen_problems
seen_problems = true
end
return true
end
global_logger(detect_problems)
push!(LOAD_PATH, ".")
using JET
using NestedTests
println(report_package("NestedTests"))
if seen_problems
exit(1)
end
| NestedTests | https://github.com/tanaylab/NestedTests.jl.git |
|
[
"MIT"
] | 0.4.0 | 715de5b4530e466006f4b64817a076209a6a135d | code | 1112 | using Logging
using LoggingExtras
seen_problems = false
detect_problems = EarlyFilteredLogger(global_logger()) do log_args
if log_args.level >= Logging.Warn
global seen_problems
seen_problems = true
end
return true
end
global_logger(detect_problems)
using Coverage
# Process '*.cov' files
coverage = process_folder("src")
coverage = append!(coverage, process_folder("test"))
# Process '*.info' files
coverage = merge_coverage_counts(
coverage,
filter!(let prefixes = (joinpath(pwd(), "src", ""), joinpath(pwd(), "test", ""))
c -> any(p -> startswith(c.filename, p), prefixes)
end, LCOV.readfolder("test")),
)
# Get total coverage for all Julia files
covered_lines, total_lines = get_summary(coverage)
percent = round(Int8, 100 * covered_lines / total_lines)
if percent == 0 && covered_lines > 0
percent = "<1%"
elseif percent == 100 && covered_lines < total_lines
percent = ">99%"
else
percent = "$(percent)%"
end
println("Line coverage: $(percent) ($(covered_lines) covered out of $(total_lines) total lines)")
if seen_problems
exit(1)
end
| NestedTests | https://github.com/tanaylab/NestedTests.jl.git |
|
[
"MIT"
] | 0.4.0 | 715de5b4530e466006f4b64817a076209a6a135d | code | 509 | using Logging
using LoggingExtras
module CountWarnings
seen_problems = 0
end
detect_problems = EarlyFilteredLogger(global_logger()) do log_args
if log_args.level >= Logging.Warn
CountWarnings.seen_problems += 1
end
return true
end
global_logger(detect_problems)
using JET
using SnoopCompile
import Pkg
Pkg.activate(".")
tinf = @snoopi_deep Pkg.test(; coverage = true, test_args = Base.ARGS)
report_callees(inference_triggers(tinf))
if CountWarnings.seen_problems > 1
exit(1)
end
| NestedTests | https://github.com/tanaylab/NestedTests.jl.git |
|
[
"MIT"
] | 0.4.0 | 715de5b4530e466006f4b64817a076209a6a135d | code | 1219 | # This file is copied into `docs` so that when the package is published its documentation will be built automatically,
# that is, pretend the package uses the usual "docs/make.jl" idiom. Normally this isn't used because we build the
# documentation locally and publish them in github pages. This way, in github we have the head version documentation,
# while in the standard Julia packages documentation we have the documentation of the last published version.
using Documenter
push!(LOAD_PATH, "..")
using NestedTests
using Pkg
for file in readdir("docs"; join = true)
if !endswith(file, "make.jl")
rm(file; force = true, recursive = true)
end
end
PROJECT_TOML = Pkg.TOML.parsefile("Project.toml")
NAME = PROJECT_TOML["name"]
AUTHORS = PROJECT_TOML["authors"]
makedocs(;
authors = join(" ", AUTHORS),
source = "../src",
clean = true,
doctest = true,
modules = [NestedTests],
highlightsig = true,
sitename = "$(NAME).jl",
draft = false,
strict = true,
linkcheck = true,
format = Documenter.HTML(; prettyurls = false),
pages = ["index.md"],
)
for file in readdir("docs/build"; join = true)
if endswith(file, ".cov")
rm(file)
end
end
| NestedTests | https://github.com/tanaylab/NestedTests.jl.git |
|
[
"MIT"
] | 0.4.0 | 715de5b4530e466006f4b64817a076209a6a135d | code | 5651 | """
Run tests in nested environments.
"""
module NestedTests
export nested_test
export test_name
export test_prefixes
export abort_on_first_failure
using Printf
using Test
ABORT_ON_FIRST_FAILURE::Bool = false
"""
abort_on_first_failure(abort::Bool)::Bool
Specify whether to abort the execution when encountering a test failure (by default, `false`). Returns the previous
setting.
"""
function abort_on_first_failure(abort::Bool)::Bool
global ABORT_ON_FIRST_FAILURE
previous = ABORT_ON_FIRST_FAILURE
return ABORT_ON_FIRST_FAILURE = abort
end
run_prefixes = Vector{Vector{SubString{String}}}()
full_name = ""
test_names = String[]
this_test = Int[]
next_test = Int[]
depth = 0
cases = 0
errors = 0
start_time_ns = UInt64(0)
struct DoneNestedTestException <: Exception end
"""
test_prefixes(prefixes::Vector{Union{String}})::Nothing
Specify prefixes for the tests to run. Only tests whose [`test_name`](@ref) matches any of these prefixes will be run.
If the vector is empty (the default), all the tests will be run.
"""
function test_prefixes(prefixes::Vector{String})::Nothing
global run_prefixes
run_prefixes = [split(prefix, "/") for prefix in prefixes]
return nothing
end
"""
test_name()::String
Return the full name of the current test, with `/` separating the nested test names.
"""
function test_name()::String
global full_name
return full_name
end
function matches_prefix(test_names::Vector{String}, prefix_names::Vector{SubString{String}})::Bool
for (test_name, prefix_name) in zip(test_names, prefix_names)
if test_name != prefix_name
return false
end
end
return true
end
function matches_prefixes(test_names::Vector{String})::Bool
global run_prefixes
if isempty(run_prefixes)
return true
end
for prefix in run_prefixes
if matches_prefix(test_names, prefix)
return true
end
end
return false
end
"""
nested_test(name::String) do ... end
Run tests in a nested environment. The test can use any of the variables defined in its parent test(s). Any changes made
to these variables will be isolated from other sibling nested tests in this level, but will be visible to descendant
nested tests.
"""
function nested_test(code::Function, name::AbstractString)::Nothing
if depth == 0
top_nested_test(code, name)
else
deep_nested_test(code, name)
end
return nothing
end
function top_nested_test(code::Function, name::AbstractString)::Nothing
global full_name
global test_names
global this_test
global next_test
global depth
global cases
global errors
@assert full_name == ""
@assert isempty(test_names)
@assert isempty(next_test)
@assert isempty(this_test)
@assert depth == 0
@assert cases == 0
@assert errors == 0
if !matches_prefixes([name])
return nothing
end
try
start_time_ns = time_ns()
push!(this_test, 0)
push!(next_test, 1)
depth = 1
while next_test[1] < 2 && (!ABORT_ON_FIRST_FAILURE || errors == 0)
@debug "Look for next test..."
this_test[1] = 0
try
deep_nested_test(code, name)
catch exception
if !(exception isa DoneNestedTestException)
rethrow(exception) # untested
end
cases += 1
end
@assert length(this_test) == 1
end
@assert isempty(test_names)
if errors > 0
throw(Test.FallbackTestSetException("$(name)/... : $(errors) failed out of $(cases) test cases"))
end
elapsed_time_s = (time_ns() - start_time_ns) / 1e9
printstyled(
"$(name)/... : all $(cases) test cases passed in $(@sprintf("%.2f", elapsed_time_s)) seconds\n";
color = :green,
)
return nothing
finally
full_name = ""
empty!(test_names)
empty!(next_test)
empty!(this_test)
depth = 0
cases = 0
errors = 0
end
end
function deep_nested_test(code::Function, name::AbstractString)::Nothing
global full_name
global test_names
global this_test
global next_test
global depth
global errors
@assert length(this_test) == depth
this_test[depth] += 1
if length(next_test) < depth
push!(next_test, 1)
end
@assert length(next_test) >= depth
push!(test_names, name)
full_name = join(test_names, "/")
if this_test < next_test[1:depth]
@debug "Skip $(full_name)..."
pop!(test_names)
return nothing
end
if length(next_test) == depth
next_test = copy(next_test)
@info "Test $(full_name)..."
end
depth += 1
push!(this_test, 0)
is_done = false
try
if matches_prefixes(test_names)
code()
else
@debug "Filter $(full_name)..."
end
is_done = true
catch exception
if exception isa Test.FallbackTestSetException
is_done = true
global errors
errors += 1
else
rethrow(exception)
end
finally
full_name = "fnord"
pop!(test_names)
pop!(this_test)
depth -= 1
if is_done
if length(this_test) < length(next_test)
pop!(next_test)
end
if length(this_test) == length(next_test)
next_test[end] += 1
end
end
end
throw(DoneNestedTestException())
end
end # module
| NestedTests | https://github.com/tanaylab/NestedTests.jl.git |
|
[
"MIT"
] | 0.4.0 | 715de5b4530e466006f4b64817a076209a6a135d | code | 2883 | using Test
using NestedTests
lines = String[]
function all_tests()::Nothing
nested_test("top") do
top_v = 1
push!(lines, "---")
push!(lines, "$(test_name()) top_v: $(top_v)")
nested_test("mid_1") do
mid_v = 1
push!(lines, "$(test_name()) top_v: $(top_v)")
push!(lines, "$(test_name()) mid_v: $(mid_v)")
@test true == false
@assert false # untested
end
nested_test("mid_2") do
mid_v = 1
push!(lines, "$(test_name()) top_v: $(top_v)")
push!(lines, "$(test_name()) mid_v: $(mid_v)")
for deep in 1:2
nested_test("deep_$(deep)") do
deep_v = 1
push!(lines, "$(test_name()) top_v: $(top_v)")
push!(lines, "$(test_name()) mid_v: $(mid_v)")
push!(lines, "$(test_name()) deep_v: $(deep_v)")
top_v += 1
mid_v += 1
deep_v += 1
return nothing
end
end
return nothing
end
return nothing
end
return nothing
end
@test_throws "top/... : 1 failed out of 5 test cases" begin
println("Ignore the test failure:")
all_tests()
end
@test lines == [
"---",
"top top_v: 1",
"top/mid_1 top_v: 1",
"top/mid_1 mid_v: 1",
"---",
"top top_v: 1",
"top/mid_2 top_v: 1",
"top/mid_2 mid_v: 1",
"top/mid_2/deep_1 top_v: 1",
"top/mid_2/deep_1 mid_v: 1",
"top/mid_2/deep_1 deep_v: 1",
"---",
"top top_v: 1",
"top/mid_2 top_v: 1",
"top/mid_2 mid_v: 1",
"top/mid_2/deep_2 top_v: 1",
"top/mid_2/deep_2 mid_v: 1",
"top/mid_2/deep_2 deep_v: 1",
"---",
"top top_v: 1",
"top/mid_2 top_v: 1",
"top/mid_2 mid_v: 1",
"---",
"top top_v: 1",
]
empty!(lines)
@test_throws "top/... : 1 failed out of 1 test cases" begin
println("Ignore the test failure:")
abort_on_first_failure(true)
all_tests()
end
abort_on_first_failure(false)
@test lines == ["---", "top top_v: 1", "top/mid_1 top_v: 1", "top/mid_1 mid_v: 1"]
empty!(lines)
test_prefixes(["other"])
all_tests()
@test isempty(lines)
test_prefixes(["top/mid_2"])
println("\nExpect no test failures:")
all_tests()
@test lines == [
"---",
"top top_v: 1",
"---",
"top top_v: 1",
"top/mid_2 top_v: 1",
"top/mid_2 mid_v: 1",
"top/mid_2/deep_1 top_v: 1",
"top/mid_2/deep_1 mid_v: 1",
"top/mid_2/deep_1 deep_v: 1",
"---",
"top top_v: 1",
"top/mid_2 top_v: 1",
"top/mid_2 mid_v: 1",
"top/mid_2/deep_2 top_v: 1",
"top/mid_2/deep_2 mid_v: 1",
"top/mid_2/deep_2 deep_v: 1",
"---",
"top top_v: 1",
"top/mid_2 top_v: 1",
"top/mid_2 mid_v: 1",
"---",
"top top_v: 1",
]
| NestedTests | https://github.com/tanaylab/NestedTests.jl.git |
|
[
"MIT"
] | 0.4.0 | 715de5b4530e466006f4b64817a076209a6a135d | docs | 4189 | # NestedTests v0.4.0 - run tests in nested environments.
See the [v0.4.0 documentation](https://tanaylab.github.io/NestedTests.jl/v0.4.0) for details.
## Motivation
When creating a test suite, it is often the case that many tests share the same setup (and teardown) code. In
particular, it is often the case that the tests form a tree, where some setup is common to many tests, and additional
setup is common to a subset of the tests, and so on until reaching the leaf test cases of this tree.
This package is built around this concept. It runs all the leaf test cases, and for each one, it runs all the setup code
needed for it, from scratch. That is, all the tests cases are isolated from each other and can freely modify the
prepared test data.
The main function of this package is [`nested_test`](@ref), which introduces a new sub-test to run. In addition, you can
always get the full path name of the current test environment using [`test_name`](@ref). For example, consider the
following:
```julia
nested_test("top") do
db = create_temporary_database()
@assert test_name() == "top"
@test is_valid_database(db)
nested_test("simple operations") do
@assert test_name() == "top/simple operations"
fill_simple_operations_data(db)
@test simple_operations_work(db)
end
nested_test("complex operations") do
@assert test_name() == "top/complex operations"
fill_complex_operations_data(db)
@test complex_operations_work(db)
end
end
```
The framework will run both the simple operations tests and the complex operations tests. Nested test cases have full
access to the variables introduced in their parent, following Julia's nested variable scopes; this allows both leaf test
cases to access the `db` variable from the top level test case. However, the framework will re-run things so that each
of the leaf tests will get a fresh database, isolating the leaf tests from each other. If a parent test case fails for
any reason (including failed `@test` assertions), then its child tests are skipped.
If you call `abort_on_first_failure(true)`, then the first failed test will abort execution. You can also restrict
the set of test cases that will be executed by specifying a list of prefixes, e.g. `test_prefixes(["top"])` will
restrict the executed tests to only cases nested under `top`.
NOTE: Don't try to wrap `nested_test` inside a `@testset` - it won't work since `@testset` takes over the failed `@test`
assertions making it difficult for the `nested_test` to tell when a test case failed. It would have been nice to
integrate both systems so that each nested test case would be a `@testset`, or at least the top-level nested test case
would be, but this requires deep integration with the implementation of `@testset`. If someone wants to take a stab at
this, pull requests are welcome :-) For now we print the total number of passed/failed test cases (not `@test`
assertions) at the end, together with the elapsed time.
## Installation
Just `Pkg.add("NestedTests")`, like installing any other Julia package.
## License (MIT)
Copyright © 2023 Weizmann Institute of Science
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
| NestedTests | https://github.com/tanaylab/NestedTests.jl.git |
|
[
"MIT"
] | 0.4.0 | 715de5b4530e466006f4b64817a076209a6a135d | docs | 147 | # NestedTests
```@docs
NestedTests
NestedTests.nested_test
NestedTests.test_name
NestedTests.abort_on_first_failure
NestedTests.test_prefixes
```
| NestedTests | https://github.com/tanaylab/NestedTests.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 721 | using BenchmarkTools
using FactorRotations
const SUITE = BenchmarkGroup()
SUITE["criterion_and_gradient!"] = BenchmarkGroup()
methods = [
Biquartimax(),
Biquartimin(),
CrawfordFerguson(kappa = 0.5),
Geomin(),
Infomax(),
MinimumEntropyRatio(),
MinimumEntropy(),
Oblimax(),
Oblimin(gamma = 0.5),
Quartimax(),
Simplimax(m = 5),
TandemCriterionI(),
TandemCriterionII(),
# TargetRotation(zeros(10, 3)), exclude due to printing issues
Varimax(),
]
for method in methods
SUITE["criterion_and_gradient!"][method] = @benchmarkable(
criterion_and_gradient!($(zeros(10, 3)), $method, $(rand(10, 3))),
evals = 10,
samples = 1000,
)
end
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 1261 | using FactorRotations
using Documenter
using DocumenterCitations
bibliography =
CitationBibliography(joinpath(@__DIR__, "src", "references.bib"), style = :authoryear)
DocMeta.setdocmeta!(
FactorRotations,
:DocTestSetup,
:(using FactorRotations, Enzyme);
recursive = true,
)
makedocs(;
checkdocs = :exported,
modules = [FactorRotations],
authors = "Philipp Gewessler",
repo = "https://github.com/p-gw/FactorRotations.jl/blob/{commit}{path}#{line}",
sitename = "FactorRotations.jl",
format = Documenter.HTML(;
prettyurls = get(ENV, "CI", "false") == "true",
canonical = "https://p-gw.github.io/FactorRotations.jl",
edit_link = "main",
assets = String[],
),
pages = [
"Home" => "index.md",
"Guides" => [
"Rotate an existing loading matrix" => "guides/basic_usage.md",
"Implementing rotation methods" => "guides/implementing_rotation_methods.md",
"Working with MultivariateStats.jl" => "guides/multivariatestats.md",
],
"Rotation Methods" => "rotation_methods.md",
"API" => "api.md",
],
plugins = [bibliography],
)
deploydocs(; repo = "github.com/p-gw/FactorRotations.jl", devbranch = "main")
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 315 | module EnzymeExt
using FactorRotations
using Enzyme: gradient!, Reverse
# wrapper for Enzyme.gradient!
FactorRotations.autodiff_gradient!(_::FactorRotations.AutodiffBackend{:Enzyme}, ∇Q::AbstractMatrix, method::RotationMethod, Λ::AbstractMatrix) =
gradient!(Reverse, ∇Q, Base.Fix1(criterion, method), Λ)
end
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 1050 | module MultivariateStatsExt
using FactorRotations, MultivariateStats
"""
rotate(model, method; kwargs...)
Perform a rotation of the models loading matrix using rotation `method`, where model is a
fitted `MultivariateStats.FactorAnalysis`, `MultivariateStats.PCA`, or
`MultivariateStats.PPCA` model.
For a list of available keyword arguments see [`rotate`](@ref).
"""
function FactorRotations.rotate(model::Union{PCA,PPCA,FactorAnalysis}, method; kwargs...)
L = MultivariateStats.loadings(model)
return rotate(L, method; kwargs...)
end
"""
rotate!(model, method; kwargs...)
Perform an in-place rotation of the models loading matrix using rotation `method`, where model is a
fitted `MultivariateStats.FactorAnalysis`, `MultivariateStats.PCA`, or
`MultivariateStats.PPCA` model.
For a list of available keyword arguments see [`rotate`](@ref).
"""
function FactorRotations.rotate!(model::Union{PCA,PPCA,FactorAnalysis}, method; kwargs...)
L = MultivariateStats.loadings(model)
return rotate!(L, method; kwargs...)
end
end
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 2300 | module FactorRotations
using Folds
using FillArrays
using LinearAlgebra
using LogExpFunctions
using Logging
using SimpleUnPack
using Statistics
import LinearAlgebra: rotate!, reflect!
export setverbosity!
export FactorRotation, loadings, rotation, factor_correlation
export rotate, rotate!
export criterion, criterion_and_gradient!
export reflect, reflect!
export kaiser_normalize, kaiser_denormalize
export kaiser_normalize!, kaiser_denormalize!
export RotationMethod
export Orthogonal, Oblique
export isorthogonal, isoblique
export rotation_type
export Biquartimax
export Biquartimin
export ComponentLoss, KatzRohlf, LinearRightConstant, Concave, Absolmin
export CrawfordFerguson
export Cubimax
export Equamax
export Geomin
export Parsimax
export PatternSimplicity
export Infomax
export MinimumEntropy
export MinimumEntropyRatio
export Oblimax
export Oblimin
export Quartimax
export Simplimax
export TandemCriterionI, TandemCriterionII, TandemCriteria
export TargetRotation
export Varimax
const VERBOSITY = Ref(false)
"""
setverbosity!(::Bool)
Sets the global verbosity level of the package.
If set to `false` (the default), package functions will not log `@info` statements.
If set to `true`, package functions will provide `@info` statements.
"""
function setverbosity!(verbose::Bool)
@info "$(@__MODULE__) logging is $(verbose ? "enabled" : "disabled") globally."
VERBOSITY[] = verbose
return nothing
end
struct AutodiffBackend{B}
AutodiffBackend(backend::Symbol) = new{backend}()
end
const AUTODIFF_BACKEND = Ref{AutodiffBackend}(AutodiffBackend(:Enzyme))
"""
set_autodiff_backend(backend::Symbol)
Sets the *automatic differentiation* backend.
Automatic differentiation is used by the fallback `criterion_and_gradient!()` implementation.
Currently, only `:Enzyme` backend is supported.
Note that to actually enable the differentiation,
the corresponding autodiff package must be loaded first
(e.g. `using Enzyme`)
"""
function set_autodiff_backend(backend::Symbol)
@info "$(@__MODULE__) autodiff backend set to $(backend)."
AUTODIFF_BACKEND[] = AutodiffBackend(backend)
return nothing
end
include("utils.jl")
include("normalize.jl")
include("rotation_types.jl")
include("methods/methods.jl")
include("rotate.jl")
include("reflect.jl")
end
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 2603 | """
kaiser_normalize!(Λ)
Perform an in-place Kaiser normalization of loading matrix `Λ`.
Returns a tuple of a normalized loading matrix and associated weights.
## Examples
```jldoctest
$(DEFINITION_L)
julia> L_norm, weights = kaiser_normalize!(L);
julia> L_norm
8×2 Matrix{Float64}:
0.902539 -0.430609
0.867524 -0.497395
0.855641 -0.517569
0.89353 -0.449004
0.84375 0.536737
0.826331 0.563184
0.80097 0.598705
0.889144 0.457627
julia> weights
8-element Vector{Float64}:
0.9196281857359527
0.9429130394686458
0.9080908544853868
0.8930873417533137
0.9315556880831118
0.8132330539273475
0.7416009708731509
0.7276661322337326
julia> L_norm ≈ L
true
```
"""
function kaiser_normalize!(Λ::AbstractMatrix)
weights = communalities(Λ)
Λ ./= weights
return Λ, weights
end
"""
kaiser_normalize(Λ)
Perform a Kaiser normalization of loading matrix `Λ`.
Returns a tuple of a normalized loading matrix and associated weights.
## Examples
```jldoctest
$(DEFINITION_L)
julia> L_norm, weights = kaiser_normalize(L);
julia> L_norm
8×2 Matrix{Float64}:
0.902539 -0.430609
0.867524 -0.497395
0.855641 -0.517569
0.89353 -0.449004
0.84375 0.536737
0.826331 0.563184
0.80097 0.598705
0.889144 0.457627
julia> weights
8-element Vector{Float64}:
0.9196281857359527
0.9429130394686458
0.9080908544853868
0.8930873417533137
0.9315556880831118
0.8132330539273475
0.7416009708731509
0.7276661322337326
```
"""
kaiser_normalize(Λ) = kaiser_normalize!(copy(Λ))
"""
kaiser_denormalize!(Λ, weights)
Undo a Kaiser normalization of normalized `Λ` in-place given `weights`.
## Examples
```
$(DEFINITION_L)
julia> L_orig = copy(L);
julia> _, weights = kaiser_normalize!(L);
julia> kaiser_denormalize!(L, weights)
8×2 Matrix{Float64}:
0.83 -0.396
0.818 -0.469
0.777 -0.47
0.798 -0.401
0.786 0.5
0.672 0.458
0.594 0.444
0.647 0.333
julia> L ≈ L_orig
true
```
"""
function kaiser_denormalize!(Λ::AbstractMatrix, weights::AbstractVector)
Λ .*= weights
return Λ
end
"""
kaiser_denormalize(Λ, weights)
Undo a Kaiser normalization of normalized loading matrix `Λ` given `weights`.
## Examples
```jldoctest
$(DEFINITION_L)
julia> L_norm, weights = kaiser_normalize(L);
julia> L_denorm = kaiser_denormalize(L_norm, weights)
8×2 Matrix{Float64}:
0.83 -0.396
0.818 -0.469
0.777 -0.47
0.798 -0.401
0.786 0.5
0.672 0.458
0.594 0.444
0.647 0.333
julia> L ≈ L_denorm
true
```
"""
kaiser_denormalize(Λ, weights) = kaiser_denormalize!(copy(Λ), weights)
communalities(Λ) = norm.(eachrow(Λ))
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 1482 | """
reflect!(r::FactorRotation)
Modify `r` in-place by swapping signs of the loading matrix `r.L` such that the sum of each
column is positive. The rotation matrix `r.T` and factor correlation matrix `r.phi` are
updated accordingly.
## Examples
```jldoctest
$(DEFINITION_L)
julia> r = rotate(L, Varimax());
julia> r_reflected = reflect!(r)
FactorRotation{Float64} with loading matrix:
8×2 Matrix{Float64}:
0.886061 0.246196
0.924934 0.183253
0.894664 0.155581
0.865205 0.221416
0.264636 0.893176
0.206218 0.786653
0.156572 0.724884
0.269424 0.67595
julia> r == r_reflected
true
```
"""
function reflect!(r::FactorRotation)
@unpack L, T, phi = r
v = reflect_cols(L)
L .*= v
T .*= v
phi .= T' * T
return r
end
"""
reflect(r::FactorRotation)
Return a new [`FactorRotation`](@ref) with a modified loading matrix such that the sum of
each column is positive. The rotation matrix and factor correlation matrix are updated
accordingly.
## Examples
```jldoctest
$(DEFINITION_L)
julia> r = rotate(L, Varimax());
julia> reflect(r)
FactorRotation{Float64} with loading matrix:
8×2 Matrix{Float64}:
0.886061 0.246196
0.924934 0.183253
0.894664 0.155581
0.865205 0.221416
0.264636 0.893176
0.206218 0.786653
0.156572 0.724884
0.269424 0.67595
```
"""
reflect(r::FactorRotation) = reflect!(deepcopy(r))
function reflect_cols(m::AbstractMatrix)
colsums = sum(m, dims = 1)
return @. ifelse(colsums < 0, -1, 1)
end
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 12831 | """
FactorRotation{T <: Real}
A type holding results of a factor rotation.
## Fields
- `L`: The rotated factor loading matrix
- `T`: The factor rotation matrix
- `phi`: The factor correlation matrix
- `weights`: Normalization weights
"""
struct FactorRotation{T}
L::Matrix{T}
T::Matrix{T}
phi::Matrix{T}
weights::Vector{T}
end
function FactorRotation(L, T, weights)
return FactorRotation(L, T, T' * T, weights)
end
function Base.show(io::IO, r::FactorRotation)
println(io, "$(typeof(r)) with loading matrix:")
show(io, "text/plain", r.L)
return nothing
end
"""
loadings(r::FactorRotation)
Return the rotated factor loading matrix from `r`.
"""
loadings(r::FactorRotation) = r.L
"""
rotation(r::FactorRotation)
Return the factor rotation matrix from `r`.
"""
rotation(r::FactorRotation) = r.T
"""
factor_correlation(r::FactorRotation)
Return the factor correlation matrix from `r`.
"""
factor_correlation(r::FactorRotation) = r.phi
"""
rotate(Λ, method::RotationMethod; kwargs...)
Perform a rotation of the factor loading matrix `Λ` using a rotation `method`.
## Keyword arguments
- `alpha`: Sets the inital value for alpha (default: 1).
- `f_atol`: Sets the absolute tolerance for the comparison of minimum criterion values when
with random starts (default: 1e-6).
- `g_atol`: Sets the absolute tolerance for convergence of the algorithm (default: 1e-6).
- `init`: A k-by-k matrix of starting values for the algorithm.
If `init = nothing` (the default), the identity matrix will be used as starting
values.
- `maxiter1`: Controls the number of maximum iterations in the outer loop of the algorithm
(default: 1000).
- `maxiter2`: Controls the number of maximum iterations in the inner loop of the algorithm
(default: 10).
- `normalize`: Perform Kaiser normalization before rotation of the loading matrix
(default: false).
- `randomstarts`: Determines if the algorithm should be started from random starting values.
If `randomstarts = false` (the default), the algorithm is calculated once
for the initial values provided by `init`.
If `randomstarts = true`, the algorithm is started 100 times from random
starting matrices.
If `randomstarts = x::Int`, the algorithm is started `x` times from random
starting matrices.
- `reflect`: Switch signs of the columns of the rotated loading matrix such that the sum of
loadings is non-negative for all columns (default: true)
- `use_threads`: Parallelize random starts using threads (default: false)
- `verbose`: Print logging statements (default: true)
- `logperiod`: How frequently to report the optimization state (default: 100).
## Return type
The `rotate` function returns a [`FactorRotation`](@ref) object.
If `randomstarts` were requested, then `rotate` returns the [`FactorRotation`](@ref) object
with minimum criterion value.
## Examples
```jldoctest; filter = r"(\\d*)\\.(\\d{4})\\d+" => s"\\1.\\2"
$(DEFINITION_L)
julia> rotate(L, Varimax())
FactorRotation{Float64} with loading matrix:
8×2 Matrix{Float64}:
0.886061 0.246196
0.924934 0.183253
0.894664 0.155581
0.865205 0.221416
0.264636 0.893176
0.206218 0.786653
0.156572 0.724884
0.269424 0.67595
```
"""
function rotate(
Λ,
method;
verbose = VERBOSITY[],
randomstarts = false,
normalize = false,
reflect = true,
f_atol = 1e-6,
g_atol = 1e-6,
use_threads = false,
kwargs...,
)
loglevel = verbose ? Logging.Info : Logging.Debug
starts = parse_randomstarts(randomstarts)
# pre-processing
if normalize
@logmsg loglevel "Performing Kaiser normalization of loading matrix."
L, weights = kaiser_normalize(Λ)
else
L, weights = Λ, ones(eltype(Λ), size(Λ, 1))
end
# rotation
if starts == 0
rotation = _rotate(L, method; g_atol, loglevel, kwargs...)
else
if :init in keys(kwargs)
@warn "Requested random starts but keyword argument `init` was provided. Ignoring initial starting values in `init`."
end
f = use_threads ? Folds.map : map
states = f(1:starts) do _
init = random_orthogonal_matrix(size(L, 2))
return _rotate(L, method; g_atol, loglevel, kwargs..., init)
end
rotation = argmin(minimumQ, states)
Q_mins = minimumQ.(states)
Q_min = minimumQ(rotation)
n_at_Q_min = sum(isapprox(Q, Q_min) for Q in Q_mins)
n_diverged = sum(is_diverged(s) for s in states)
@logmsg loglevel "Finished $(starts) rotations with random starts."
if n_diverged == starts
@warn "All $(starts) rotations did not converge. Please check the provided rotation method and/or loading matrix."
elseif n_diverged > 0
@warn "There were $(n_diverged) rotations that did not converge. Please check the provided rotation method and/or loading matrix."
else
@logmsg loglevel "There were 0 rotations that did not converge."
end
@logmsg loglevel "$(n_at_Q_min) rotations converged to the same minimum value, Q = $(Q_min)"
end
# post-processing
if normalize
@logmsg loglevel "Denormalizing rotated loading matrix."
kaiser_denormalize!(rotation.L, weights)
end
rot = FactorRotation(rotation.L, rotation.T, weights)
reflect && reflect!(rot)
return rot
end
function parse_randomstarts(x::Bool; default = 100)
starts = x ? default : 0
return starts
end
function parse_randomstarts(x::Int)
x > 0 && return x
msg = "Invalid value argument $(x) for `randomstarts`. Please provide an integer > 0 or set `randomstarts = true` to use the default."
throw(ArgumentError(msg))
end
function rotate(Λ, method::TandemCriteria; kwargs...)
rotation_1 = rotate(Λ, TandemCriterionI(); kwargs...)
reduced_loading_matrix = loadings(rotation_1)[:, 1:method.keep]
rotation_2 = rotate(reduced_loading_matrix, TandemCriterionII(); kwargs...)
return rotation_2
end
"""
rotate!(Λ, method::RotationMethod; kwargs...)
Perform a rotation of the factor loading matrix `Λ` and overwrite `Λ` with the rotated
loading matrix.
For a list of available keyword arguments see [`rotate`](@ref).
## Examples
```jldoctest; filter = r"(\\d*)\\.(\\d{4})\\d+" => s"\\1.\\2"
$(DEFINITION_L)
julia> rotate!(L, Quartimax())
8×2 Matrix{Float64}:
0.898755 0.194823
0.933943 0.129748
0.902132 0.103864
0.876508 0.171284
0.315572 0.876476
0.251124 0.773489
0.198008 0.714678
0.307858 0.659334
```
"""
function rotate!(Λ, method; kwargs...)
rot = rotate(Λ, method; kwargs...)
Λ .= loadings(rot)
return Λ
end
"""
IterationState
A struct that holds the state of an iteration of the rotation algorithm.
"""
struct IterationState{T<:Real,V<:Real}
alpha::T
maxiter::Int
Q::V
end
"""
RotationState
A struct that holds the state of the rotation algorithm.
## Fields
- `init`: The initial rotation matrix
- `A`: The initial factor loading matrix
- `T`: The current state of the rotation matrix
- `L`: The current state of the rotated loading matrix
- `iterations`: A vector of [`IterationState`](@ref) that holds iteration states of the
optimization.
"""
mutable struct RotationState{RT<:RotationType,T<:AbstractMatrix,S<:IterationState}
init::T
A::T
T::T
Ti::Union{Nothing,T}
L::T
iterations::Vector{S}
is_converged::Bool
end
function RotationState(::Type{T}, init, A) where {T<:RotationType}
if T <: Orthogonal
Ti = nothing
L = A * init
elseif T <: Oblique
Ti = inv(init)
L = A * Ti'
else
throw(ArgumentError("Unsupported rotation type $(T)"))
end
S = IterationState{eltype(A),eltype(A)}
return RotationState{T,typeof(A),S}(init, A, init, Ti, L, S[], false)
end
is_converged(state::RotationState) = state.is_converged
is_diverged(state::RotationState) = !is_converged(state)
function minimumQ(state::RotationState)
return length(state.iterations) > 0 ? last(state.iterations).Q : NaN
end
"""
_rotate(A::AbstractMatrix, method::RotationMethod{Orthogonal}; kwargs...)
Implements the algorithm for factor rotation described in Bernaard & Jennrich (2005) for
orthogonal factor rotation.
"""
function _rotate(
A::AbstractMatrix{TV},
method::RotationMethod{RT};
g_atol = 1e-6,
alpha = 1,
maxiter1 = 1000,
maxiter2 = 10,
init::Union{Nothing,AbstractMatrix} = nothing,
logperiod::Integer = 100,
loglevel,
) where {RT,TV<:Real}
@logmsg loglevel "Initializing rotation using algorithm $(typeof(method))."
state = initialize(RT, init, A; loglevel)
∇Q = similar(state.L)
Q = criterion_and_gradient!(∇Q, method, state.L)
@logmsg loglevel "Initial criterion value = $(Q)"
# preallocate variables to avoid unnecessary allocations
ft = Q
G = gradient_f!(similar(state.T), state, ∇Q)
X = similar(state.T)
Tt = similar(state.T)
Gp = similar(state.T)
s = zero(eltype(G))
α = Float64(alpha) # for type stability
@logmsg loglevel "Starting optimization..."
for i in 1:maxiter1
project_G!(Gp, state, G)
s = norm(Gp)
is_converged(s, g_atol) && break
α *= 2
for _ in 1:maxiter2
copy!(X, state.T)
axpy!(-α, Gp, X)
project_X!(Tt, state, X)
update_state!(state, Tt)
Q = criterion_and_gradient!(∇Q, method, state.L)
if (Q < ft - 0.5 * s^2 * α)
# update state.T (and reuse the old one for the next iteration)
Tt, state.T = state.T, Tt
ft = Q
gradient_f!(G, state, ∇Q)
break
else
α /= 2
end
end
if (i == 1 || i == maxiter1 || mod(i, logperiod) == 0)
@logmsg loglevel "Current optimization state:" iteration = i criterion = Q alpha =
α
end
iteration_state = IterationState(α, maxiter2, Q)
push!(state.iterations, iteration_state)
end
if !is_converged(s, g_atol)
@warn "Algorithm did not converge after $(maxiter1) iterations (|∇G|=$(s) > $(g_atol))"
else
state.is_converged = true
@logmsg loglevel "Rotation algorithm converged after $(length(state.iterations)) iterations."
@logmsg loglevel "Final criterion value = $(ft)"
end
return state
end
"""
initialize(init, A)
Initialize a [`RotationState`](@ref) with initial values `init` and original loading matrix
`A`. If `init = nothing`, the identity matrix will be used as initial values.
"""
function initialize(
::Type{RT},
init,
A::AbstractMatrix{TV};
loglevel,
) where {RT<:RotationType,TV}
_, k = size(A)
if isnothing(init)
@logmsg loglevel "No initial values provided. Using identity matrix as starting value."
T = Matrix{TV}(I, k, k)
else
T = init
end
if size(T) != (k, k)
throw(ArgumentError("matrix of starting values must be of size ($k, $k)"))
end
return RotationState(RT, T, A)
end
function gradient_f!(G::AbstractMatrix, state::RotationState{Orthogonal}, ∇Q)
@unpack A = state
mul!(G, A', ∇Q)
return G
end
function gradient_f!(G::AbstractMatrix, state::RotationState{Oblique}, ∇Q)
@unpack L, Ti = state
mul!(G, Ti', ∇Q' * L, -1, 0)
return G
end
"""
project!(Gp, T, G)
Compute the projection `Gp` of `G` and store the results in `Gp`.
"""
function project_G!(Gp, state::RotationState{Orthogonal}, G)
@unpack T = state
M = T' * G
S = M + M'
mul!(Gp, T, S, -0.5, 0)
axpy!(1, G, Gp)
return Gp
end
function project_G!(Gp, state::RotationState{Oblique}, G)
@unpack T = state
TG = T .* G
mul!(Gp, T, diagm(vec(sum(TG, dims = 1))), -1, 0)
axpy!(1, G, Gp)
return Gp
end
"""
compute the projection `Tt` of `X`.
"""
function project_X!(Tt, state::RotationState{Orthogonal}, X)
@unpack U, Vt = svd(X)
mul!(Tt, U, Vt)
return Tt
end
function project_X!(Tt, state::RotationState{Oblique}, X)
v = inv.(sqrt.(sum(abs2, X, dims = 1)))
mul!(Tt, X, diagm(vec(v)))
return Tt
end
"""
update the rotation state given a new projection `Tt`.
"""
function update_state!(state::RotationState{Orthogonal}, Tt)
return mul!(state.L, state.A, Tt)
end
function update_state!(state::RotationState{Oblique}, Tt)
state.Ti = inv(Tt)
return mul!(state.L, state.A, state.Ti')
end
"""
is_converged(s, g_atol)
determines the convergence status of the algorithm.
"""
is_converged(s, g_atol) = s < g_atol
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 329 | """
RotationType
An abstract type representing a type of factor rotation.
"""
abstract type RotationType end
"""
Orthogonal
A type representing an orthogonal rotation type.
"""
struct Orthogonal <: RotationType end
"""
Oblique
A type representing an oblique rotation type.
"""
struct Oblique <: RotationType end
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 943 | const DEFINITION_L = """
julia> L = [
0.830 -0.396
0.818 -0.469
0.777 -0.470
0.798 -0.401
0.786 0.500
0.672 0.458
0.594 0.444
0.647 0.333
];
"""
function zerodiag!(x)
for i in diagind(x)
x[i] = 0.0
end
return x
end
mxlogx(x) = -xlogx(x)
"""
nthsmallest(m::AbstractArry, n)
Get the nth smallest element of `m`.
"""
nthsmallest(m::AbstractArray, n) = sort(vec(m))[n]
"""
random_orthogonal_matrix(n::Int)
Return a random orthogonal square matrix of size (n, n).
"""
function random_orthogonal_matrix(n::Int)
Q, R = qr(randn(n, n))
O = Q * Diagonal(sign.(diag(R)))
return O
end
"""
centercols!(m::AbstractMatrix)
Efficiently substract the column mean for each column in `m`.
"""
function centercols!(m::AbstractMatrix)
for col in eachcol(m)
col .-= mean(col)
end
return m
end
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 542 | """
Biquartimax()
The Biquartimax rotation method.
## Details
The Biquartimax rotation method is a special case of the [`Oblimin`](@ref) rotation with
parameters `gamma = 0.5` and `orthogonal = true`.
## Examples
```jldoctest; filter = r"(\\d*)\\.(\\d{4})\\d+" => s"\\1.\\2"
$(DEFINITION_L)
julia> L_biquartimax = rotate(L, Biquartimax());
julia> L_oblimin = rotate(L, Oblimin(gamma = 0.5, orthogonal = true));
julia> loadings(L_biquartimax) ≈ loadings(L_oblimin)
true
```
"""
Biquartimax() = Oblimin(gamma = 0.5, orthogonal = true)
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 818 | """
Biquartimin
The Biquartimin rotation criterion.
# Keyword arguments
- `orthogonal`: orthogonal: If orthogonal = true an orthogonal rotation is performed, an
oblique rotation otherwise. (default: `false`)
"""
struct Biquartimin{RT} <: RotationMethod{RT}
function Biquartimin(; orthogonal = false)
T = orthogonal ? Orthogonal : Oblique
return new{T}()
end
end
function criterion_and_gradient!(∇Q::OptionalGradient, ::Biquartimin, Λ::AbstractMatrix)
Λ₂ = @view Λ[:, 2:end]
Λ₂sq = Λ₂ .^ 2
part = !isnothing(∇Q) ? @view(∇Q[:, 2:end]) : similar(Λ₂sq)
part .= sum(Λ₂sq, dims = 2) .- Λ₂sq
Q = sum(Λ₂sq .* part)
if !isnothing(∇Q)
# ∇Q[:, 2:end] === part
∇Q[:, 1] .= zero(eltype(∇Q))
@. part *= 4 * Λ₂
end
return Q
end
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 4359 | """
AbstractComponentLoss{RT} <: RotationMethod{RT}
An abstract type representing component loss functions.
Implementing a custom component loss `T` requires that `T <: AbstractComponentLoss`.
"""
abstract type AbstractComponentLoss{RT} <: RotationMethod{RT} end
function criterion_and_gradient!(
::Nothing,
method::AbstractComponentLoss,
Λ::AbstractMatrix{<:Real},
)
return -sum(method.loss, Λ)
end
"""
ComponentLoss(loss::Function; orthogonal = false)
A generic implementation of the component loss factor rotation method.
`loss` defines the loss function that is applied to the components of the loading matrix.
## Keyword arguments
- `orthogonal`: If `orthogonal = true` an orthogonal rotation is performed, an oblique
rotation otherwise. (default: `false`)
## Details
The component loss factor rotation applies a loss function to each element of the factor
loading matrix. Then the following criterion is minimized:
```math
Q(\\Lambda) = \\sum_i \\sum_j h(\\lambda_{ij})
```
## Examples
### Quartimax as a component loss
```jldoctest; filter = [r"(\\d*)\\.(\\d{4})\\d+" => s"\\1.\\2", r"var\\"(.*)\\"" => s""]
$(DEFINITION_L)
julia> quartimax_loss = ComponentLoss(x -> x^4, orthogonal = true);
julia> L_component_loss = rotate(L, quartimax_loss)
FactorRotation{Float64} with loading matrix:
8×2 Matrix{Float64}:
0.898755 0.194824
0.933943 0.129749
0.902131 0.103864
0.876508 0.171284
0.315572 0.876476
0.251123 0.773489
0.198007 0.714678
0.307857 0.659334
julia> L_quartimax = rotate(L, Quartimax())
FactorRotation{Float64} with loading matrix:
8×2 Matrix{Float64}:
0.898755 0.194823
0.933943 0.129748
0.902132 0.103864
0.876508 0.171284
0.315572 0.876476
0.251124 0.773489
0.198008 0.714678
0.307858 0.659334
julia> isapprox(loadings(L_component_loss), loadings(L_quartimax), atol = 1e-5)
true
```
"""
struct ComponentLoss{RT,F} <: AbstractComponentLoss{RT}
loss::F
function ComponentLoss(loss::F; orthogonal = false) where {F}
T = orthogonal ? Orthogonal : Oblique
return new{T,F}(loss)
end
end
"""
KatzRohlf(bandwidth)
A component loss criterion with loss function
```math
h(\\lambda) = 1 - \\exp\\left(-\\left(\\frac{\\lambda}{b}\\right)^2\\right)
```
where ``b`` is the bandwidth parameter.
"""
struct KatzRohlf{F} <: AbstractComponentLoss{Orthogonal}
bandwidth::Float64
loss::F
function KatzRohlf(bandwidth)
bandwidth > 0 || throw(ArgumentError("bandwidth must be positive"))
loss(x) = 1 - exp(-(x / bandwidth)^2)
return new{typeof(loss)}(bandwidth, loss)
end
end
"""
LinearRightConstant(bandwidth)
The linear right constant component loss factor rotation criterion.
It has the loss function
```math
h(\\lambda) = \\begin{cases}
(\\frac{\\lambda}{b})^2&\\text{if } |\\lambda| \\leq b \\\\
1 &\\text{if } \\lambda > b
\\end{cases}
```
where ``b`` is the bandwidth parameter.
"""
struct LinearRightConstant{F} <: AbstractComponentLoss{Orthogonal}
bandwidth::Float64
loss::F
function LinearRightConstant(bandwidth)
bandwidth > 0 || throw(ArgumentError("bandwidth must be positive"))
loss(x) = abs(x) > bandwidth ? 1.0 : (x / bandwidth)^2
return new{typeof(loss)}(bandwidth, loss)
end
end
"""
Concave(bandwidth = 1)
The simple concave component loss factor rotation criterion.
It has the loss function
```math
h(\\lambda) = 1 - \\exp(-\\frac{|\\lambda|}{b})
```
where ``b`` is the bandwidth parameter.
"""
struct Concave{F} <: AbstractComponentLoss{Oblique}
bandwidth::Float64
loss::F
function Concave(bandwidth = 1)
bandwidth > 0 || throw(ArgumentError("bandwidth must be positive"))
loss(x) = 1 - exp(-abs(x) / bandwidth)
return new{typeof(loss)}(bandwidth, loss)
end
end
"""
Absolmin(epsilon)
The Absolmin component loss factor rotation criterion.
It has the loss function
```math
h(\\lambda) = |\\lambda|
```
"""
struct Absolmin{F} <: AbstractComponentLoss{Oblique}
epsilon::Float64
loss::F
function Absolmin(epsilon)
epsilon > 0 || throw(ArgumentError("epsilon must be positive"))
b = 1 / (2 * epsilon)
a = epsilon - b * epsilon^2
loss(x) = abs(x) > epsilon ? abs(x) : a + b * abs2(x)
return new{typeof(loss)}(epsilon, loss)
end
end
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 1556 | """
CrawfordFerguson(; kappa, orthogonal = false)
The family of Crawford-Ferguson rotation methods.
## Keyword arguments
- `kappa`: The parameter determining the rotation criterion (see *Details*).
- `orthogonal`: orthogonal: If orthogonal = true an orthogonal rotation is performed, an
oblique rotation otherwise. (default: `false`)
## Details
The Crawford-Ferguson family allows both orthogonal and oblique rotation of the
*p*×*k* factor loading matrix. If orthogonal rotation is performed, Crawford-Ferguson with
a specific value for `kappa` is equivalent to the following rotation methods:
- *κ = γ/p*: [`Oblimin(gamma = γ, orthogonal = true)`](@ref Oblimin)
- *κ = 0*: [`Quartimax`](@ref)
- *κ = 1/p*: [`Varimax`](@ref)
- *κ = k/2p*: [`Equamax`](@ref)
- *κ = (k - 1)/(p + k - 2)*: [`Parsimax`](@ref)
- *κ = 1*: Factor parsimony
## Examples
```jldoctest
julia> CrawfordFerguson(kappa = 0, orthogonal = true)
CrawfordFerguson{Orthogonal, Int64}(0)
julia> CrawfordFerguson(kappa = 1/2, orthogonal = false)
CrawfordFerguson{Oblique, Float64}(0.5)
```
"""
struct CrawfordFerguson{T,V} <: RotationMethod{T}
κ::V
function CrawfordFerguson(; kappa, orthogonal = false)
0 <= kappa <= 1 || throw(ArgumentError("kappa must be between 0 and 1"))
T = orthogonal ? Orthogonal : Oblique
return new{T,typeof(kappa)}(kappa)
end
end
criterion_and_gradient!(∇Q::OptionalGradient, method::CrawfordFerguson, Λ::AbstractMatrix{<:Real}) =
weighted_sums_criterion_and_gradient!(∇Q, Λ, 1 - method.κ, method.κ)
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 420 | """
Equamax()
*Equamax* is an orthogonal rotation method, which is equivalent to [`Oblimin`](@ref)
rotation of *p × k* loadings matrix *Λ* with ``\\gamma = \\frac{k}{2}``.
"""
struct Equamax <: RotationMethod{Orthogonal} end
function criterion_and_gradient!(∇Q::OptionalGradient, ::Equamax, Λ::AbstractMatrix{<:Real})
p, k = size(Λ)
weighted_sums_criterion_and_gradient!(∇Q, Λ, 1 - k / (2p), k / (2p))
end
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 761 | """
Geomin(epsilon = 0.01)
The Geomin rotation method.
## Keyword arguments
- `epsilon`: A small constant to deal with zero loadings.
"""
struct Geomin{T} <: RotationMethod{Oblique}
ε::T
function Geomin(; epsilon::T = 0.01) where {T<:Real}
epsilon >= 0 || throw(ArgumentError("epsilon must be non-negative"))
return new{T}(epsilon)
end
end
function criterion_and_gradient!(∇Q::OptionalGradient, method::Geomin, Λ::AbstractMatrix{T}) where {T}
@unpack ε = method
Λsq = !isnothing(∇Q) ? ∇Q : similar(Λ)
Λsq .= Λ .^ 2 .+ ε
k = size(Λ, 2)
part = exp.(sum(log.(Λsq), dims=2) ./ k)
Q = sum(part)
if !isnothing(∇Q)
# ∇Q === Λsq
∇Q .= (2 / k) .* Λ ./ Λsq .* part
end
return Q
end
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 1005 | """
Infomax(; orthogonal = false)
The Infomax rotation method.
## Keyword arguments
- `orthogonal`: If `orthogonal = true` an orthogonal rotation is performed, an oblique
rotation otherwise. (default: `false`)
"""
struct Infomax{T} <: RotationMethod{T}
function Infomax(; orthogonal = false)
T = orthogonal ? Orthogonal : Oblique
return new{T}()
end
end
function criterion_and_gradient!(∇Q::OptionalGradient, ::Infomax, Λ::AbstractMatrix{T}) where {T}
k = size(Λ, 2)
Λsq = Λ .^ 2
total = sum(Λsq)
Λsq ./= total
rowsums = sum(Λsq, dims = 2)
colsums = sum(Λsq, dims = 1)
Q = -log(k) + sum(mxlogx, Λsq) - sum(mxlogx, rowsums) -
sum(mxlogx, colsums)
isnothing(∇Q) && return Q
H = @. -(log(Λsq) + 1)
G₀ = @. (H - dot(Λsq, H))
h₁ = @. -(log(rowsums) + 1)
G₁ = @. (h₁ - dot(rowsums, h₁))
h₂ = @. -(log(colsums) + 1)
G₂ = @. (h₂ - dot(h₂, colsums))
@. ∇Q = (2/total) * Λ * (G₀ - G₁ - G₂)
return Q
end
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 4905 | """
RotationMethod{T<:RotationType}
An abstract type representing a factor rotation method.
Each implementation of `M <: RotationMethod` must provide [`criterion_and_gradient!`](@ref)
method.
"""
abstract type RotationMethod{T<:RotationType} end
"""
criterion(method::RotationMethod, Λ::Abstractmatrix{<:Real})
Calculate the criterion of a given `method` with respect to the factor loading matrix `Λ`.
The method is just a wrapper for a [`criterion_and_gradient!(nothing, method, Λ)`](@ref criterion_and_gradient!) call.
"""
criterion(method::RotationMethod, Λ::AbstractMatrix{<:Real}) = criterion_and_gradient!(nothing, method, Λ)
"""
criterion_and_gradient!(∇Q::Union{AbstractMatrix{<:Real}, Nothing},
method::RotationMethod, Λ::AbstractMatrix{<:Real})
Calculate the quality criterion *Q* and its gradient for a given `method`
with respect to the factor loading matrix `Λ`.
The gradient is output into `∇Q` matrix, which should have the same dimensions as `Λ`.
The `∇Q` calculation is skipped if `∇Q ≡ nothing`.
Returns the *Q* criterion value.
"""
criterion_and_gradient!
OptionalGradient = Union{AbstractMatrix, Nothing}
# fallback method when autodiff backend is not available
function autodiff_gradient!(_::AutodiffBackend{B}, ∇Q::AbstractMatrix, method::RotationMethod, Λ::AbstractMatrix) where B
if B == :Enzyme
error("Enzyme.jl autodiff backend is not loaded. Have you run \"using Enzyme\"?")
else
error("$(B) autodiff backend is not supported.")
end
end
autodiff_gradient!(∇Q::AbstractMatrix, method::RotationMethod, Λ::AbstractMatrix) =
autodiff_gradient!(AUTODIFF_BACKEND[], ∇Q, method, Λ)
# fallback method that applies auto-diff to criterion() call
function criterion_and_gradient!(∇Q::OptionalGradient, method::RotationMethod, Λ::AbstractMatrix)
if !isnothing(∇Q)
autodiff_gradient!(∇Q, method, Λ)
else
error("$(typeof(method)) does not implement neither criterion_and_gradient!(∇Q, ...) nor criterion_and_gradient!(nothing, ...) methods.")
end
return criterion(method, Λ)
end
"""
isorthogonal(::RotationMethod)
Checks if the supplied rotation method is orthogonal.
## Examples
```jldoctest
julia> isorthogonal(Varimax())
true
julia> isorthogonal(Oblimax(orthogonal = false))
false
```
"""
isorthogonal(method::RotationMethod) = method isa RotationMethod{Orthogonal}
"""
isoblique(::RotationMethod)
Checks if the supplied rotation method is oblique.
## Examples
```jldoctest
julia> isoblique(Varimax())
false
julia> isoblique(Oblimax(orthogonal = false))
true
```
"""
isoblique(method::RotationMethod) = method isa RotationMethod{Oblique}
"""
rotation_type(::RotationMethod)
Return the rotation type for a given rotation method.
## Examples
```jldoctest
julia> rotation_type(Varimax())
Orthogonal
julia> rotation_type(Oblimin(gamma = 0.5))
Oblique
```
"""
rotation_type(::RotationMethod{RT}) where {RT} = RT
"""
weighted_sums_criterion_and_gradient!(
[∇Q::AbstractMatrix],
Λ::AbstractMatrix{<:Real},
columns_weight::Number, rows_weight::Number
)
Calculate the value and the gradient of the criterion, which
is based on the weighted column- and row-wise sums of *Λ²*.
Specifically,
```math
Q(Λ) = \\frac{c}{4} ∑ⱼᵐ \\left(∑ᵢⁿ Λ²_{i,j}\\right)² +
\\frac{r}{4} ∑ᵢⁿ \\left(∑ⱼᵐ Λ²_{i,j}\\right)² -
\\frac{1}{4} ∑ᵢⁿ∑ⱼᵐ Λ⁴,
```
where *c* is `columns_weight` and *r* is `rows_weight`.
The gradient is output into `∇Q` matrix, which should have the same dimensions as `Λ`.
The `∇Q` calculation is skipped if `∇Q ≡ nothing`.
This function is used by multiple rotation methods, such as [`CrawfordFerguson`](@ref),
[`Equamax`](@ref), [`Oblimin`](@ref), and [`Parsimax`](@ref).
"""
function weighted_sums_criterion_and_gradient!(
∇Q::Union{Nothing, AbstractMatrix},
Λ::AbstractMatrix{<:Real},
columns_weight::Number, rows_weight::Number
)
Λsq = !isnothing(∇Q) ? ∇Q : similar(Λ)
Λsq .= Λ .^ 2
Λsq_rowsum = sum(Λsq, dims=1)
Λsq_colsum = sum(Λsq, dims=2)
Q = (columns_weight * sum(abs2, Λsq_colsum) + rows_weight * sum(abs2, Λsq_rowsum) - sum(abs2, Λsq)) / 4
if !isnothing(∇Q)
# ∇Q === Λsq
# weighted Λ² columns and rows sum at each position - Λ²
∇Q .= (columns_weight .* Λsq_colsum) .+
(rows_weight .* Λsq_rowsum) .- Λsq
∇Q .*= Λ
end
return Q
end
include("biquartimax.jl")
include("biquartimin.jl")
include("component_loss.jl")
include("crawford_ferguson.jl")
include("equamax.jl")
include("geomin.jl")
include("infomax.jl")
include("minimum_entropy.jl")
include("minimum_entropy_ratio.jl")
include("oblimax.jl")
include("target_rotation.jl")
include("oblimin.jl")
include("parsimax.jl")
include("pattern_simplicity.jl")
include("quartimax.jl")
include("simplimax.jl")
include("tandem_criteria.jl")
include("varimax.jl")
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 457 | """
MinimumEntropy()
The Minimum Entropy rotation method.
"""
struct MinimumEntropy <: RotationMethod{Orthogonal} end
function criterion_and_gradient!(∇Q::OptionalGradient, ::MinimumEntropy, Λ::AbstractMatrix)
Λsq = Λ .^ 2
mlogΛsq = !isnothing(∇Q) ? ∇Q : similar(Λ)
@. mlogΛsq = -log(Λsq)
Q = dot(Λsq, mlogΛsq) / 2
if !isnothing(∇Q)
# ∇Q === mlogΛsq
∇Q .-= one(eltype(∇Q))
∇Q .*= Λ
end
return Q
end
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 778 | """
MinimumEntropyRatio()
The Minimum Entropy Ratio rotation method.
"""
struct MinimumEntropyRatio <: RotationMethod{Orthogonal} end
function criterion_and_gradient!(∇Q::OptionalGradient, ::MinimumEntropyRatio, Λ::AbstractMatrix{T}) where {T}
p, k = size(Λ)
Λsq = Λ .^ 2
total = sum(Λsq)
colsums = sum(Λsq, dims = 1)
p₂ = colsums / total
Q₁ = sum(mxlogx, Λsq / colsums)
Q₂ = sum(mxlogx, p₂)
Q = log(Q₁) - log(Q₂)
isnothing(∇Q) && return Q
u = Ones(T, p)
v = Ones(T, k)
M = u * u'
R = M * Λsq
P = Λsq ./ R
H = @. -(log(P) + 1)
G₁ = H ./ R - M * (Λsq .* H ./ (R .^ 2))
h = @. -(log(p₂) + 1)
α = h * p₂'
G₂ = u * h / total - α .* u * v'
@. ∇Q = 2Λ * (G₁ / Q₁ - G₂ / Q₂)
return Q
end
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 1078 | """
Oblimax(; orthogonal = false)
The Oblimax rotation method.
## Keyword arguments
- `orthogonal`: If `orthogonal = true` an orthogonal rotation is performed, an oblique
rotation otherwise. (default: `false`)
## Details
The Oblimax rotation method is equivalent to [`Quartimax`](@ref) for orthogonal rotation.
## Examples
```jldoctest; filter = r"(\\d*)\\.(\\d{4})\\d+" => s"\\1.\\2"
$(DEFINITION_L)
julia> L_oblimax = rotate(L, Oblimax(orthogonal = true));
julia> L_quartimax = rotate(L, Quartimax());
julia> isapprox(loadings(L_oblimax), loadings(L_quartimax), atol = 1e-6)
true
```
"""
struct Oblimax{T} <: RotationMethod{T}
function Oblimax(; orthogonal = false)
T = orthogonal ? Orthogonal : Oblique
return new{T}()
end
end
function criterion_and_gradient!(∇Q::OptionalGradient, ::Oblimax, Λ::AbstractMatrix)
sqnorm_Λsq = sum(x -> x^4, Λ)
sqnorm_Λ = norm(Λ)^2
K = sqnorm_Λsq / sqnorm_Λ^2
Q = -log(K)
if !isnothing(∇Q)
∇Q .= Λ .^ 3
axpby!(4/sqnorm_Λ, Λ, -4/sqnorm_Λsq, ∇Q)
end
return Q
end
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 1522 | """
Oblimin(; gamma, orthogonal = false)
The family of *Oblimin* rotation methods.
## Keyword arguments
- `gamma`: The shape parameter determining the rotation criterion (see *Details*).
- `orthogonal`: If `orthogonal = true` an orthogonal rotation is performed, an oblique
rotation otherwise. (default: `false`)
## Details
The *Oblimin* rotation family allows orthogonal as well as oblique rotation of the *p*×*k* factor
loading matrix. If orthogonal rotation is performed, *Oblimin* is equivalent to the following
rotation methods given a value for `gamma`:
- *γ = p×κ*: [`CrawfordFerguson(kappa = κ, orthogonal = true)`](@ref CrawfordFerguson)
- *γ = 0*: [`Quartimax`](@ref)
- *γ = 1/2*: [`Biquartimax`](@ref)
- *γ = 1*: [`Varimax`](@ref)
- *γ = k/2*: [`Equamax`](@ref)
- *γ = p×(k - 1)/(p + k - 2)*: [`Parsimax`](@ref)
For oblique rotation *Oblimin* is equivalent to the following rotation methods:
- *γ = 0*: *Quartimin*
- *γ = 1/2*: [`Biquartimin`](@ref)
## Examples
```jldoctest
julia> Oblimin(gamma = 0.5)
Oblimin{Oblique, Float64}(0.5)
julia> Oblimin(gamma = 1, orthogonal = true)
Oblimin{Orthogonal, Int64}(1)
```
"""
struct Oblimin{T,V} <: RotationMethod{T}
γ::V
function Oblimin(; gamma, orthogonal = false)
T = orthogonal ? Orthogonal : Oblique
return new{T,typeof(gamma)}(gamma)
end
end
criterion_and_gradient!(∇Q::OptionalGradient, method::Oblimin, Λ::AbstractMatrix{<:Real}) =
weighted_sums_criterion_and_gradient!(∇Q, Λ, 1 - method.γ, method.γ / size(Λ, 1))
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 467 | """
Parsimax()
*Parsimax* is an orthogonal rotation method, which is equivalent to [`CrawfordFerguson`](@ref)
rotation of *p × k* loadings matrix *Λ* with ``\\kappa = \\frac{k - 1}{p + k - 2}``.
"""
struct Parsimax <: RotationMethod{Orthogonal} end
function criterion_and_gradient!(∇Q::OptionalGradient, ::Parsimax, Λ::AbstractMatrix{<:Real})
p, k = size(Λ)
weighted_sums_criterion_and_gradient!(∇Q, Λ, (p - 1) / (p + k - 2), (k - 1) / (p + k - 2))
end
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 750 | """
PatternSimplicity(; orthogonal = false)
The Pattern Simplicity factor rotation criterion.
## Keyword arguments
- `orthogonal`: If `orthogonal = true` an orthogonal rotation is performed, an oblique
rotation otherwise. (default: `false`)
"""
struct PatternSimplicity{RT} <: RotationMethod{RT}
function PatternSimplicity(; orthogonal = false)
T = orthogonal ? Orthogonal : Oblique
return new{T}()
end
end
function criterion_and_gradient!(∇Q::OptionalGradient, method::PatternSimplicity, Λ::AbstractMatrix)
Λsq = Λ .^ 2
m = Λsq' * Λsq
diag_m = diagm(diag(m))
if !isnothing(∇Q)
∇Q .= Λ .* (Λsq * (inv(m) - inv(diag_m)))
lmul!(-4, ∇Q)
end
return logdet(diag_m) - logdet(m)
end
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 813 | """
Quartimax()
The Quartimax rotation criterion.
## Details
The Quartimax criterion is a special case of the [`Oblimin`](@ref) rotation criterion with
parameter `gamma = 0`.
## Examples
### Setting up the criterion
```jldoctest
julia> Quartimax()
Quartimax()
```
### Testing equivalence of Quartimax and Oblimin
```jldoctest; filter = r"(\\d*)\\.(\\d{4})\\d+" => s"\\1.\\2"
$(DEFINITION_L)
julia> L_quartimax = rotate(L, Quartimax());
julia> L_oblimin = rotate(L, Oblimin(gamma = 0, orthogonal = true));
julia> loadings(L_quartimax) ≈ loadings(L_oblimin)
true
```
"""
struct Quartimax <: RotationMethod{Orthogonal} end
function criterion_and_gradient!(∇Q::OptionalGradient, method::Quartimax, Λ::AbstractMatrix)
if !isnothing(∇Q)
@. ∇Q = -Λ^3
end
return -sum(x -> x^4, Λ) / 4
end
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 527 | """
Simplimax(; m::Int)
The Simplimax rotation method.
"""
struct Simplimax <: RotationMethod{Oblique}
m::Int
function Simplimax(; m::Int)
m < 1 && throw(ArgumentError("m must be greater or equal to 1"))
return new(m)
end
end
function criterion_and_gradient!(∇Q::OptionalGradient, method::Simplimax, Λ::AbstractMatrix)
Λsq = Λ .^ 2
λm = nthsmallest(Λsq, method.m)
Λind = Λsq .<= λm
Q = dot(Λsq, Λind) / 2
if !isnothing(∇Q)
∇Q .= Λ .* Λind
end
return Q
end
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 1437 | """
TandemCriterionI()
The first criterion of the tandem criteria factor rotation method.
"""
struct TandemCriterionI <: RotationMethod{Orthogonal} end
function criterion_and_gradient!(∇Q::OptionalGradient, method::TandemCriterionI, Λ::AbstractMatrix)
Λsq = Λ .^ 2
ΛxΛ = Λ * Λ'
part = !isnothing(∇Q) ? ∇Q : similar(Λsq)
mul!(part, ΛxΛ.^2, Λsq)
Q = -dot(Λsq, part)
if !isnothing(∇Q)
∇Q .*= Λ # ∇Q === part
mul!(∇Q, ΛxΛ .* (Λsq * Λsq'), Λ, -4, -4)
end
return Q
end
"""
TandemCriterionI()
The second criterion of the tandem criteria factor rotation method.
"""
struct TandemCriterionII <: RotationMethod{Orthogonal} end
function criterion_and_gradient!(∇Q::OptionalGradient, method::TandemCriterionII, Λ::AbstractMatrix)
Λsq = Λ .^ 2
ΛxΛ = Λ * Λ'
part = !isnothing(∇Q) ? ∇Q : similar(Λsq)
mul!(part, (1 .- ΛxΛ.^2), Λsq)
Q = dot(Λsq, part)
if !isnothing(∇Q)
∇Q .*= Λ # ∇Q === part
mul!(∇Q, ΛxΛ .* (Λsq * Λsq'), Λ, -4, 4)
end
return Q
end
"""
TandemCriteria(; keep)
The tandem criteria rotation method.
## Keyword arguments
- `keep`: The number of factors to keep for the second tandem criterion.
"""
struct TandemCriteria <: RotationMethod{Orthogonal}
keep::Int
function TandemCriteria(; keep)
keep > 1 || throw(ArgumentError("must keep more than 1 factor for rotation"))
return new(keep)
end
end
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 2608 | """
TargetRotation(target::AbstractMatrix; orthogonal = false)
The (partial) target rotation criterion.
## Keyword arguments
- `orthogonal`: If `orthogonal = true` an orthogonal rotation is performed, an oblique
rotation otherwise. (default: `false`)
## Details
Target rotation rotates a factor loading matrix towards the target matrix, `target`.
For a fully specified `target` matrix (e.g. all entries in the matrix are numbers), full
target rotation is performed.
Partially specified target rotation can be achieved setting the unspecified entries in the
`target` matrix to `missing`.
## Examples
### Full target rotation
```jldoctest; filter = r"(\\d*)\\.(\\d{4})\\d+" => s"\\1.\\2"
$(DEFINITION_L)
julia> target = [1 0; 1 0; 1 0; 1 0; 0 1; 0 1; 0 1; 0 1];
julia> rotate(L, TargetRotation(target, orthogonal = true))
FactorRotation{Float64} with loading matrix:
8×2 Matrix{Float64}:
0.882633 0.258215
0.922358 0.195806
0.892467 0.167726
0.862116 0.233154
0.252473 0.89669
0.195508 0.789382
0.146707 0.726945
0.260213 0.679549
```
### Partially specified target rotation
```jldoctest; filter = r"(\\d*)\\.(\\d{4})\\d+" => s"\\1.\\2"
$(DEFINITION_L)
julia> target = [1 0; missing missing; 1 0; 1 0; 0 1; 0 1; 0 1; 0 1];
julia> rotate(L, TargetRotation(target, orthogonal = true))
FactorRotation{Float64} with loading matrix:
8×2 Matrix{Float64}:
0.873299 0.288209
0.915133 0.227193
0.886218 0.198109
0.85365 0.262462
0.221701 0.90479
0.168434 0.795599
0.121793 0.731532
0.236852 0.68804
```
"""
struct TargetRotation{T,V<:AbstractMatrix,W<:Union{AbstractMatrix, Nothing}} <: RotationMethod{T}
target::V
weights::W
function TargetRotation(target; orthogonal = false)
T = orthogonal ? Orthogonal : Oblique
# construct weight matrix assuming 'missing' are unspecified values
if any(ismissing, target)
weights = @. !ismissing(target)
target = coalesce.(target, zero(nonmissingtype(eltype(target))))
else # no missing values, weights are not required
weights = nothing
end
return new{T,typeof(target),typeof(weights)}(target, weights)
end
end
function criterion_and_gradient!(∇Q::OptionalGradient, method::TargetRotation, Λ::AbstractMatrix)
@unpack target, weights = method
size(target) == size(Λ) ||
throw(ArgumentError("target matrix and loading matrix must be of equal size"))
dQ = isnothing(∇Q) ? similar(Λ) : ∇Q
@. dQ = Λ - target
if !isnothing(weights)
@. dQ *= weights
end
Q = norm(dQ)^2 / 2
return Q
end
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 988 | """
Varimax()
The *Varimax* rotation criterion.
## Details
The *Varimax* is an orthogonal rotation method that maximizes the column variances of the loading matrix.
It is a special case of the [`Oblimin`](@ref) rotation criterion with parameter
`gamma = 1`.
## Examples
### Setting up the criterion
```jldoctest
julia> Varimax()
Varimax()
```
### Testing equivalence of Varimax and Oblimin
```jldoctest; filter = r"(\\d*)\\.(\\d{4})\\d+" => s"\\1.\\2"
$(DEFINITION_L)
julia> L_varimax = rotate(L, Varimax());
julia> L_oblimin = rotate(L, Oblimin(gamma = 1, orthogonal = true));
julia> loadings(L_varimax) ≈ loadings(L_oblimin)
true
```
"""
struct Varimax <: RotationMethod{Orthogonal} end
function criterion_and_gradient!(∇Q::OptionalGradient, ::Varimax, Λ::AbstractMatrix)
Λsq = isnothing(∇Q) ? similar(Λ) : ∇Q
Λsq .= Λ .^ 2
centercols!(Λsq)
Q = -norm(Λsq)^2 / 4
if !isnothing(∇Q)
@. ∇Q *= -Λ # ∇Q is already centered Λsq
end
return Q
end
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 12887 | function test_criterion_and_gradient(method, Λ)
∇Q = fill!(similar(Λ), NaN)
Q = @inferred(criterion_and_gradient!(∇Q, method, Λ))
@test Q isa Real
@test all(isfinite, ∇Q)
# test criterion-only calculation
Q2 = @inferred(criterion_and_gradient!(nothing, method, Λ))
@test Q2 == Q
# test criterion() wrapper
Q3 = @inferred(criterion(method, Λ))
@test Q3 isa Real
@test Q3 == Q
return nothing
end
function test_rotate(method, Λ; init)
rot = @inferred(rotate(Λ, method; init))
p, k = size(Λ)
@test size(@inferred(loadings(rot))) == (p, k)
@test size(@inferred(rotation(rot))) == (k, k)
@test size(@inferred(factor_correlation(rot))) == (k, k)
@test loadings(rot) * rotation(rot)' ≈ Λ
if isorthogonal(method)
@test factor_correlation(rot) ≈ I
end
end
function test_equivalence(Λ, m1::RotationMethod, m2::RotationMethod; kwargs...)
r1 = rotate(Λ, m1; kwargs...)
r2 = rotate(Λ, m2; kwargs...)
@test loadings(r1) ≈ loadings(r2) atol = 1e-5
@test rotation(r1) ≈ rotation(r2) atol = 1e-5
@test factor_correlation(r1) ≈ factor_correlation(r2) atol = 1e-5
end
@testset "factor rotation autodiff fallback" begin
method = ComponentLoss(abs2, orthogonal = true)
∇Q = fill!(similar(A), NaN)
FactorRotations.set_autodiff_backend(:ABC)
@test_throws "ABC autodiff backend is not supported" criterion_and_gradient!(∇Q, method, A)
FactorRotations.set_autodiff_backend(:Enzyme)
@test_throws "Enzyme.jl autodiff backend is not loaded" criterion_and_gradient!(∇Q, method, A)
end
@testset "factor rotation methods" begin
@testset "utility functions" begin
@test isorthogonal(Varimax()) != isoblique(Varimax())
@test isorthogonal(Oblimax(orthogonal = false)) !=
isoblique(Oblimax(orthogonal = false))
end
@testset "Biquartimax" begin
method = Biquartimax()
@test isorthogonal(method)
test_criterion_and_gradient(method, A)
# Biquartimax is a special case of Oblimin
test_equivalence(A, Biquartimax(), Oblimin(gamma = 0.5, orthogonal = true); init)
end
@testset "Biquartimin" begin
# Example 3.1 in Jennrich & Bentler (2011)
bifactorA = [
1.17 0.78 0.18
2.08 0.78 -0.22
1.17 0.78 0.18
2.15 -0.62 -0.08
1.23 -0.62 0.32
2.15 -0.62 -0.08
]
# oblique case
method = Biquartimin()
@test isoblique(method)
test_criterion_and_gradient(method, bifactorA)
# orthogonal case
method = Biquartimin(orthogonal = true)
@test isorthogonal(method)
test_criterion_and_gradient(method, bifactorA)
end
@testset "ComponentLoss" begin
using Enzyme # load Enzyme to enable autodiff
# orthogonal case
method = ComponentLoss(abs2, orthogonal = true)
@test isorthogonal(method)
test_criterion_and_gradient(method, A)
method = KatzRohlf(0.3)
@test isorthogonal(method)
@test_throws ArgumentError KatzRohlf(-1.0)
@test_throws ArgumentError KatzRohlf(0)
test_criterion_and_gradient(method, A)
method = LinearRightConstant(0.3)
@test isorthogonal(method)
@test_throws ArgumentError LinearRightConstant(-1.0)
@test_throws ArgumentError LinearRightConstant(0)
test_criterion_and_gradient(method, A)
# component loss identical to quartimax
test_equivalence(A, ComponentLoss(x -> x^4, orthogonal = true), Quartimax(); init)
# oblique case
method = ComponentLoss(abs2, orthogonal = false)
@test isoblique(method)
test_criterion_and_gradient(method, A)
method = Concave(1)
@test isoblique(method)
test_criterion_and_gradient(method, A)
@test_throws ArgumentError Concave(0)
@test_throws ArgumentError Concave(-2)
method = Absolmin(1e-5)
@test isoblique(method)
test_criterion_and_gradient(method, A)
@test_throws ArgumentError Absolmin(-1.0)
@test_throws ArgumentError Absolmin(0)
end
@testset "CrawfordFerguson" begin
@test_throws ArgumentError CrawfordFerguson(kappa = 2.0)
@test_throws ArgumentError CrawfordFerguson(kappa = -0.2)
# orthogonal case
method = CrawfordFerguson(kappa = 0.2, orthogonal = true)
@test isorthogonal(method)
test_criterion_and_gradient(method, A)
# for the orthogonal case, Crawford-Ferguson and Oblimin are equivalent
p, k = size(A)
test_equivalence(
A,
CrawfordFerguson(kappa = 0, orthogonal = true),
Quartimax();
init,
)
test_equivalence(
A,
CrawfordFerguson(kappa = 0, orthogonal = true),
Oblimin(gamma = 0, orthogonal = true);
init,
)
test_equivalence(
A,
CrawfordFerguson(kappa = 1 / p, orthogonal = true),
Varimax();
init,
)
test_equivalence(
A,
CrawfordFerguson(kappa = 1 / p, orthogonal = true),
Oblimin(gamma = 1, orthogonal = true);
init,
)
test_equivalence(
A,
Equamax(),
CrawfordFerguson(kappa = k / (2p), orthogonal = true);
init,
)
test_equivalence(
A,
Parsimax(),
CrawfordFerguson(kappa = (k - 1) / (p + k - 2), orthogonal = true);
init,
)
# TODO: Factor Parsimony: kappa = 1
# oblique case
method = CrawfordFerguson(kappa = 0.5, orthogonal = false)
@test isoblique(method)
test_criterion_and_gradient(method, A)
end
@testset "Equamax" begin
method = Equamax()
@test isorthogonal(method)
test_criterion_and_gradient(method, A)
end
@testset "Geomin" begin
@test_throws ArgumentError Geomin(epsilon = -1)
method = Geomin()
@test isoblique(method)
test_criterion_and_gradient(method, A)
end
@testset "Infomax" begin
# orthogonal case
method = Infomax(orthogonal = true)
@test isorthogonal(method)
test_criterion_and_gradient(method, A)
# oblique case
method = Infomax(orthogonal = false)
@test isoblique(method)
test_criterion_and_gradient(method, A)
end
@testset "MinimumEntropy" begin
method = MinimumEntropy()
@test isorthogonal(method)
test_criterion_and_gradient(method, A)
end
@testset "MinimumEntropyRatio" begin
method = MinimumEntropyRatio()
@test isorthogonal(method)
test_criterion_and_gradient(method, A)
end
@testset "Oblimax" begin
# orthogonal case
method = Oblimax(orthogonal = true)
@test isorthogonal(method)
test_criterion_and_gradient(method, A)
# Oblimax is equivalent to Quartimax in the orthogonal case
test_equivalence(A, method, Quartimax(); init)
# oblique case
method = Oblimax(orthogonal = false)
@test isoblique(method)
test_criterion_and_gradient(method, A)
end
@testset "Oblimin" begin
p, k = size(A)
# orthogonal case
method = Oblimin(gamma = 0.5, orthogonal = true)
@test isorthogonal(method)
test_criterion_and_gradient(method, A)
# oblique case
method = Oblimin(gamma = 0.0, orthogonal = false)
@test isoblique(method)
test_criterion_and_gradient(method, A)
test_equivalence(A, Equamax(), Oblimin(gamma = k / 2, orthogonal = true); init)
test_equivalence(
A,
Parsimax(),
Oblimin(gamma = p * (k - 1) / (p + k - 2), orthogonal = true);
init,
)
end
@testset "Parsimax" begin
method = Parsimax()
@test isorthogonal(method)
test_criterion_and_gradient(method, A)
end
@testset "PatternSimplicity" begin
# orthogonal case
method = PatternSimplicity(orthogonal = true)
@test isorthogonal(method)
test_criterion_and_gradient(method, A)
# oblique case
method = PatternSimplicity(orthogonal = false)
@test isoblique(method)
test_criterion_and_gradient(method, A)
end
@testset "TandemCriteria" begin
method = TandemCriterionI()
@test isorthogonal(method)
test_criterion_and_gradient(method, A)
method = TandemCriterionII()
@test isorthogonal(method)
test_criterion_and_gradient(method, A)
@test_throws ArgumentError TandemCriteria(keep = 0)
@test_throws ArgumentError TandemCriteria(keep = -1)
@test_throws ArgumentError TandemCriteria(keep = 1)
method = TandemCriteria(keep = 2)
@test isorthogonal(method)
rot = rotate(A, method)
@test size(loadings(rot)) == (8, 2)
end
@testset "TargetRotation" begin
@test_throws ArgumentError criterion_and_gradient!(
similar(A),
TargetRotation([0 1; 1 0]),
A,
)
# orthogonal + complete case
method = TargetRotation(similar(A), orthogonal = true)
@test isorthogonal(method)
test_criterion_and_gradient(method, A)
# orthogonal + partial specification
target = [missing 1; 0 1; 0 1; 0 1; 1 0; 1 0; 1 0; 1 0]
method = TargetRotation(target, orthogonal = true)
@test isorthogonal(method)
test_criterion_and_gradient(method, A)
# oblique + complete case
method = TargetRotation(similar(A), orthogonal = false)
@test isoblique(method)
test_criterion_and_gradient(method, A)
# oblique + partial specification
method = TargetRotation(target, orthogonal = false)
@test isoblique(method)
test_criterion_and_gradient(method, A)
end
@testset "Quartimax" begin
method = Quartimax()
@test isorthogonal(method)
test_criterion_and_gradient(method, A)
# Quartimax is a special case of Oblimin
test_equivalence(A, Quartimax(), Oblimin(gamma = 0, orthogonal = true); init)
# test that rotation result is identical to GPArotation
Ar = rotate(A, Quartimax(); init, g_atol = 1e-7)
# loadings published in Bernaards & Jennrich (2005)
# within the reported accuracy of 7 digits
#pub = [
# 0.8987554 0.1948197
# 0.9339440 0.1297446
# 0.9021319 0.1038604
# 0.8765090 0.1712805
# 0.3155758 0.8764747
# 0.2511265 0.7734879
# 0.1980102 0.7146775
# 0.3078601 0.6593331
#]
# loadings obtained with GPArotation v2024.3
# quartimax(A, eps=1e-8, maxit=50000, randomStarts=10)
gpa = [
0.8987545678868889 0.19482357840480186
0.9339434064715286 0.1297486551312077
0.9021314838553653 0.10386426641014213
0.8765082522883497 0.1712842189765975
0.31557202157519415 0.87647606881132
0.2511231928032839 0.7734889411208703
0.19800711751346906 0.7146783762042948
0.3078572424280441 0.6593344510069232
]
@test FactorRotations.loadings(Ar) ≈ gpa atol = 1e-6
@test criterion(Quartimax(), Ar.L) ≈ criterion(Quartimax(), gpa) atol = 1e-8
end
@testset "Simplimax" begin
@test_throws ArgumentError Simplimax(m = 0)
@test_throws ArgumentError Simplimax(m = -10)
method = Simplimax(m = 8)
@test isoblique(method)
test_criterion_and_gradient(method, A)
end
@testset "Varimax" begin
method = Varimax()
@test isorthogonal(method)
test_criterion_and_gradient(method, A)
# Varimax is a special case of Oblimin
test_equivalence(A, Varimax(), Oblimin(gamma = 1, orthogonal = true); init)
# Varimax is a special case of Crawford-Ferguson
p = size(A, 1)
test_equivalence(
A,
Varimax(),
CrawfordFerguson(kappa = 1 / p, orthogonal = true);
init,
)
end
@testset "Missing criterion implementation" begin
struct NoCriterion <: RotationMethod{Orthogonal} end
@test_throws "NoCriterion does not implement" criterion(NoCriterion(), randn(6, 6))
# Enzyme.jl would refuse to autodiff because it detects that fallback criterion() throws an error
@test_throws "NoCriterion does not implement" criterion_and_gradient!(
randn(6, 5),
NoCriterion(),
randn(6, 5),
)
end
end
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 365 | @testset "Kaiser normalization" begin
m = Matrix{Float64}(I(3))
@test kaiser_normalize(m)[1] == I(3)
@test kaiser_normalize(m)[2] == ones(3)
@test kaiser_denormalize(m, fill(0.5, 3)) == 0.5 * m
m = copy(A)
_, w = kaiser_normalize!(m)
@test all(w .<= 1)
@test all(norm.(eachrow(m)) .≈ 1)
@test kaiser_denormalize!(m, w) ≈ A
end
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.