licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"Apache-2.0"
] | 0.1.17 | 6936b20a42124581c88a5b0d9cc8b5c234eec682 | code | 3722 | #---G4Box--------------------------------------------------------------------------------------
function GeometryBasics.coordinates(box::G4Box, facets=6)
x, y, z = GetXHalfLength(box), GetYHalfLength(box), GetZHalfLength(box)
return Point3{Float64}[(-x,-y,-z), (-x,-y, z), (-x, y, z), (-x, y,-z),
(-x,-y,-z), ( x,-y,-z), ( x,-y, z), (-x,-y, z),
(-x,-y,-z), (-x, y,-z), ( x, y,-z), ( x,-y,-z),
( x, y, z), (-x, y, z), (-x,-y, z), ( x,-y, z),
( x, y, z), ( x,-y, z), ( x,-y,-z), ( x, y,-z),
( x, y, z), ( x, y,-z), (-x, y,-z), (-x, y, z)]
end
function GeometryBasics.faces(::G4Box, facets=6)
return QuadFace{Int}[(1,2,3,4), (5,6,7,8), (9,10,11,12), (13,14,15,16), (17,18,19,20), (21,22,23,24)]
end
#---G4Trd--------------------------------------------------------------------------------------
function GeometryBasics.coordinates(trd::G4Trd, facets=6)
x1 = GetXHalfLength1(trd)
x2 = GetXHalfLength2(trd)
y1 = GetYHalfLength1(trd)
y2 = GetYHalfLength2(trd)
z = GetZHalfLength(trd)
Point3{Float64}[(-x1,-y1,-z), ( x1,-y1,-z), (-x1, y1,-z), ( x1, y1,-z),
(-x2,-y2, z), ( x2,-y2, z), (-x2, y2, z), ( x2, y2, z)]
end
function GeometryBasics.faces(::G4Trd, facets=6)
iface = (( 1,5,6,2),(3,4,8,7),( 1,3,7,5),(2,6,8,4),( 1,2,4,3),(5,6,8,7))
[QuadFace{Int64}(f...) for f in iface]
end
#---G4Para--------------------------------------------------------------------------------------
function GeometryBasics.coordinates(par::G4Para, facets=6)
x = GetXHalfLength(par)
y = GetYHalfLength(par)
z = GetZHalfLength(par)
α = GetAlpha(par)
θ = GetTheta(par)
ϕ = GetPhi(par)
ztθcϕ = z * tan(θ) * cos(ϕ)
ztθsϕ = z * tan(θ) * sin(ϕ)
ytα = y * tan(α)
Point3{Float64}[(-ztθcϕ - ytα - x, -ztθsϕ - y, -z),
(-ztθcϕ - ytα + x, -ztθsϕ - y, -z),
(-ztθcϕ + ytα - x, -ztθsϕ + y, -z),
(-ztθcϕ + ytα + x, -ztθsϕ + y, -z),
( ztθcϕ - ytα - x, ztθsϕ - y, z),
( ztθcϕ - ytα + x, ztθsϕ - y, z),
( ztθcϕ + ytα - x, ztθsϕ + y, z),
( ztθcϕ + ytα + x, ztθsϕ + y, z)]
end
function GeometryBasics.faces(::G4Para, facets=6)
iface = (( 1,5,6,2),(3,4,8,7),( 1,3,7,5),(2,6,8,4),( 1,2,4,3),(5,6,8,7))
[QuadFace{Int64}(f...) for f in iface]
end
#---G4Trap--------------------------------------------------------------------------------------
function GeometryBasics.coordinates(par::G4Trap, facets=6)
x₁ = GetXHalfLength1(par)
x₂ = GetXHalfLength2(par)
x₃ = GetXHalfLength3(par)
x₄ = GetXHalfLength4(par)
y₁ = GetYHalfLength1(par)
y₂ = GetYHalfLength2(par)
z = GetZHalfLength(par)
α₁ = GetAlpha1(par)
α₂ = GetAlpha2(par)
θ = GetTheta(par)
ϕ = GetPhi(par)
ztθcϕ = z * tan(θ) * cos(ϕ)
ztθsϕ = z * tan(θ) * sin(ϕ)
Point3{Float64}[(-ztθcϕ - y₁*tan(α₁) - x₁, -ztθsϕ - y₁, -z),
(-ztθcϕ - y₁*tan(α₁) + x₁, -ztθsϕ - y₁, -z),
(-ztθcϕ + y₁*tan(α₁) - x₂, -ztθsϕ + y₁, -z),
(-ztθcϕ + y₁*tan(α₁) + x₂, -ztθsϕ + y₁, -z),
( ztθcϕ - y₂*tan(α₂) - x₃, ztθsϕ - y₂, z),
( ztθcϕ - y₂*tan(α₂) + x₃, ztθsϕ - y₂, z),
( ztθcϕ + y₂*tan(α₂) - x₄, ztθsϕ + y₂, z),
( ztθcϕ + y₂*tan(α₂) + x₄, ztθsϕ + y₂, z)]
end
function GeometryBasics.faces(::G4Trap, facets=6)
iface = (( 1,5,6,2),(3,4,8,7),( 1,3,7,5),(2,6,8,4),( 1,2,4,3),(5,6,8,7))
[QuadFace{Int64}(f...) for f in iface]
end | Geant4 | https://github.com/JuliaHEP/Geant4.jl.git |
|
[
"Apache-2.0"
] | 0.1.17 | 6936b20a42124581c88a5b0d9cc8b5c234eec682 | code | 6949 | #---G4Tubs--------------------------------------------------------------------------------------
function GeometryBasics.coordinates(tub::G4Tubs, facets=36)
rmin = GetInnerRadius(tub)
rmax = GetOuterRadius(tub)
z = GetZHalfLength(tub)
ϕ₀ = GetStartPhiAngle(tub)
Δϕ = GetDeltaPhiAngle(tub)
issector = Δϕ < 2π
ishollow = rmin > 0
issector ? facets = round(Int64, (facets/2π) * Δϕ) : nothing
isodd(facets) ? facets = 2 * div(facets, 2) : nothing
facets < 8 ? facets = 8 : nothing
facets = Int(facets / 2) # Number of faces
nbv = issector ? facets + 1 : facets # Number of vertices
nbc = ishollow ? nbv : 1 # Number of centers
z = z
range = 1:(2*nbv + 2*nbc)
function inner(i)
return if i <= 2*nbv
ϕ = ϕ₀ + (Δϕ * (((i + 1) ÷ 2) - 1)) / facets
up = ifelse(isodd(i), z, -z)
Point(rmax * cos(ϕ), rmax * sin(ϕ), up)
elseif ishollow
ϕ = ϕ₀ + (Δϕ * (((i - 2 * nbv + 1) ÷ 2) - 1)) / facets
up = ifelse(isodd(i), z, -z)
Point(rmin * cos(ϕ), rmin * sin(ϕ), up)
elseif i == length(range)
Point(0., 0., -z)
elseif i == length(range) - 1
Point(0., 0., z)
end
end
return (inner(i) for i in range)
end
function GeometryBasics.faces(tub::G4Tubs, facets=36)
rmin = GetInnerRadius(tub)
Δϕ = GetDeltaPhiAngle(tub)
issector = Δϕ < 2π
ishollow = rmin > 0
issector ? facets = round(Int64, (facets/2π) * Δϕ) : nothing
isodd(facets) ? facets = 2 * div(facets, 2) : nothing
facets < 8 ? facets = 8 : nothing
facets = Int(facets / 2) # Number of faces
nbv = issector ? facets + 1 : facets # Number of vertices
nbc = ishollow ? nbv : 1 # Number of centers
indexes = Vector{QuadFace{Int64}}()
for j in 1:facets
a,b = 2j-1, 2j
c,d = !issector && j == facets ? (1, 2) : (2j+1, 2j+2)
push!(indexes, (a,b,d,c))
if ishollow
a′,b′ = 2j-1+2nbv, 2j+2nbv
c′,d′ = !issector && j == facets ? (2nbv+1, 2nbv+2) : (2j+1+2nbv, 2j+2+2nbv)
# inner wall
push!(indexes, (a′,b′,d′,c′))
# top
push!(indexes, (c, c′, a′, a))
# bottom
push!(indexes, (b, b′, d′, d))
else
a′,b′ = 2nbv+1, 2nbv+2
# top
push!(indexes, (a′,a, c, c))
# bottom
push!(indexes, (b′,d, b, b))
end
end
if issector
# wedge walls
a, b, c, d = ( 1, 2, 2nbv-1, 2nbv)
a′,b′,c′,d′ = ishollow ? (2nbv+1, 2nbv+2, 4nbv-1, 4nbv ) : (2nbv+1, 2nbv+2, 2nbv+1, 2nbv+2)
push!(indexes, (a, b, b′, a′))
push!(indexes, (c′, d′, d, c ))
end
return indexes
end
function GeometryBasics.normals(tub::G4Tubs, facets=36)
rmin = GetInnerRadius(tub)
Δϕ = GetDeltaPhiAngle(tub)
issector = Δϕ < 2π
ishollow = rmin > 0
issector ? facets = round(Int64, (facets/2π) * Δϕ) : nothing
isodd(facets) ? facets = 2 * div(facets, 2) : nothing
facets < 8 ? facets = 8 : nothing
facets = Int(facets / 2) # Number of faces
nbv = issector ? facets + 1 : facets # Number of vertices
nbc = ishollow ? nbv : 1 # Number of centers
range = 1:(2*nbv + 2*nbc)
function inner(i)
return if i <= 2*nbv
ϕ = ϕ₀ + (Δϕ * (((i + 1) ÷ 2) - 1)) / facets
up = ifelse(isodd(i), 1/√2, -1/√2)
Vector3(cos(ϕ)/√2, sin(ϕ)/√2, up)
elseif ishollow
ϕ = ϕ₀ + (Δϕ * (((i + 1) ÷ 2) - 1)) / facets
up = ifelse(isodd(i), 1/√2, -1/√2)
Vector3(-cos(ϕ)/√2, -sin(ϕ)/√2, up)
elseif i == length(range)
Vector3(0., 0., -1.)
elseif i == length(range) - 1
Vector3(0., 0., 1.)
end
end
return (inner(i) for i in range)
end
#---CutTubs----------------------------------------------------------------------------------------
function zLimit(tub::G4CutTubs, dz, r, ϕ)
lownorm = GetLowNorm(tub)
highnorm = GetHighNorm(tub)
if dz < 0
newz = dz - r*(cos(ϕ) * x(lownorm) + sin(ϕ) * y(lownorm))/z(lownorm)
else
newz = dz - r*(cos(ϕ) * x(highnorm) + sin(ϕ) * y(highnorm))/z(highnorm)
end
end
function GeometryBasics.coordinates(tub::G4CutTubs, facets=36)
rmin = GetInnerRadius(tub)
rmax = GetOuterRadius(tub)
z = GetZHalfLength(tub)
ϕ₀ = GetStartPhiAngle(tub)
Δϕ = GetDeltaPhiAngle(tub)
issector = Δϕ < 2π
ishollow = rmin > 0
issector ? facets = round(Int64, (facets/2π) * Δϕ) : nothing
isodd(facets) ? facets = 2 * div(facets, 2) : nothing
facets < 8 ? facets = 8 : nothing
facets = Int(facets / 2) # Number of faces
nbv = issector ? facets + 1 : facets # Number of vertices
nbc = ishollow ? nbv : 1 # Number of centers
range = 1:(2*nbv + 2*nbc)
function inner(i)
return if i <= 2*nbv
ϕ = ϕ₀ + (Δϕ * (((i + 1) ÷ 2) - 1)) / facets
Point(rmax * cos(ϕ), rmax * sin(ϕ), zLimit(tub, z*(-1)^(i%2+1), rmax, ϕ))
elseif ishollow
ϕ = ϕ₀ + (Δϕ * (((i - 2 * nbv + 1) ÷ 2) - 1)) / facets
Point(rmin * cos(ϕ), rmin * sin(ϕ), zLimit(tub, z*(-1)^(i%2+1), rmin, ϕ))
elseif i == length(range)
Point(0., 0., -z)
elseif i == length(range) - 1
Point(0., 0., z)
end
end
return [inner(i) for i in range]
end
function GeometryBasics.faces(tub::G4CutTubs, facets=36)
rmin = GetInnerRadius(tub)
rmax = GetOuterRadius(tub)
z = GetZHalfLength(tub)
ϕ₀ = GetStartPhiAngle(tub)
Δϕ = GetDeltaPhiAngle(tub)
GeometryBasics.faces(G4Tubs("",rmin, rmax, z, ϕ₀, Δϕ ), facets)
end
#---G4EllipticalTube--------------------------------------------------------------------------------------
function GeometryBasics.coordinates(tub::G4EllipticalTube, facets=36)
dx = GetDx(tub)
dy = GetDy(tub)
dz = GetDz(tub)
range = 1:(2*facets + 2)
function inner(i)
return if i <= 2*facets
ϕ = (2π * (((i + 1) ÷ 2) - 1)) / facets
up = ifelse(isodd(i), dz, -dz)
Point(dx * cos(ϕ), dy * sin(ϕ), up)
elseif i == length(range)
Point(0., 0., -dz)
elseif i == length(range) - 1
Point(0., 0., dz)
end
end
return (inner(i) for i in range)
end
function GeometryBasics.faces(tub::G4EllipticalTube, facets=36)
indexes = Vector{QuadFace{Int64}}()
for j in 1:facets
a,b = 2j-1, 2j
c,d = j == facets ? (1, 2) : (2j+1, 2j+2)
push!(indexes, (a,b,d,c))
a′,b′ = 2facets+1, 2facets+2
# top
push!(indexes, (a′,a, c, c))
# bottom
push!(indexes, (b′,d, b, b))
end
return indexes
end
| Geant4 | https://github.com/JuliaHEP/Geant4.jl.git |
|
[
"Apache-2.0"
] | 0.1.17 | 6936b20a42124581c88a5b0d9cc8b5c234eec682 | code | 779 | #----Visualization Settings------------------------------------------------------------------------
# the format is a nested Julia NamedTuple (parsed directly from the program)
# Note that with a single element in the tuple the user needs to append a ","
#--------------------------------------------------------------------------------------------------
(
display = (
backgroundcolor = :black, # Display background color
resolution = (1280, 720), # Display resolution
show_axis = true, # Whether to show or not the axis
),
trajectories = (
color = :yellow, # Color of the trajectories
),
detector = (
show_detector = true, # Whether to draw the detector or not
),
)
| Geant4 | https://github.com/JuliaHEP/Geant4.jl.git |
|
[
"Apache-2.0"
] | 0.1.17 | 6936b20a42124581c88a5b0d9cc8b5c234eec682 | code | 999 | using CxxWrap
using Geant4_jll
using Expat_jll
using Xerces_jll
#---Build the wrapper library----------------------------------------------------------------------
builddir = joinpath(@__DIR__, "build")
sourcedir = @__DIR__
cd(@__DIR__)
mkpath(builddir)
cd(builddir)
if Sys.isapple()
ENV["SDKROOT"]=readchomp(`xcrun --sdk macosx --show-sdk-path`)
end
cxxwrap_prefix = CxxWrap.prefix_path()
geant4_prefix = Geant4_jll.artifact_dir
xerces_prefix = Xerces_jll.artifact_dir
#-DXercesC_LIBRARY_RELEASE=$(Xerces_jll.libxerces)
run(`cmake -DCMAKE_BUILD_TYPE=Release
-DCMAKE_CXX_STANDARD=std++17
-DEXPAT_INCLUDE_DIR=$(Expat_jll.artifact_dir)/include
-DEXPAT_LIBRARY=$(Expat_jll.libexpat)
-DXercesC_INCLUDE_DIR=$(Xerces_jll.artifact_dir)/include
-DCxxWrap_VERSION=$(pkgversion(CxxWrap))
-DCMAKE_PREFIX_PATH=$cxxwrap_prefix\;$geant4_prefix\;$xerces_prefix $sourcedir`)
run(`cmake --build . --config Release --parallel 8`)
| Geant4 | https://github.com/JuliaHEP/Geant4.jl.git |
|
[
"Apache-2.0"
] | 0.1.17 | 6936b20a42124581c88a5b0d9cc8b5c234eec682 | code | 2886 | #----Callback structures---------------------------------------------------------------------------
mutable struct CallBackData{CONTEXT}
context::CONTEXT
callback::Function # (i::Int32, ctx::T)::Int32
end
struct CallBack{CONTEXT}
cfunction::CxxWrap.CxxWrapCore.SafeCFunction
cbdata::CallBackData{CONTEXT}
end
#----Global vector (GC) ---------------------------------------------------------------------------
callbacks = Vector{CallBack}()
#----Function to create a callback avoiding the use of closures (ARM is not supporting closures)
# We need to project with a lock the make_callback since it can be called from concurrent threads
const spinlock = Base.Threads.SpinLock()
function make_callback(ctx, fun, rt, args)
lock(spinlock)
try
i_callback = Symbol(:_internal_callback_, Symbol(typeof(ctx)))
narg = length(args)
CONTEXT = typeof(ctx)
#---create the internal callback function------------------------------------------------------
if narg == 0
eval(
quote
function $i_callback(data::Ptr{Cvoid})::$rt
cb_data = unsafe_pointer_to_objref(data)::CallBackData{$CONTEXT}
cb_data.callback(cb_data.context)
end
end
)
elseif narg == 1
eval(
quote
function $i_callback(a1::$(args[1]), data::Ptr{Cvoid})::$rt
cb_data = unsafe_pointer_to_objref(data)::CallBackData{$CONTEXT}
cb_data.callback(a1[], cb_data.context)
end
end
)
elseif narg == 2
eval(
quote
function $i_callback(a1::$(args[1]), a2::$(args[2]), data::Ptr{Cvoid})::$rt
cb_data = unsafe_pointer_to_objref(data)::CallBackData{$CONTEXT}
cb_data.callback(a1[], a2[], cb_data.context)
end
end
)
else
error("callbacks with more than 2 arguments not yet supported")
end
#---Instantiate the callback-------------------------------------------------------------------
cb = eval(
quote
CallBack{$CONTEXT}( @safe_cfunction($i_callback, $rt, ($(args...), Ptr{Cvoid})),
CallBackData{$CONTEXT}($ctx, $fun))
end
)
push!(callbacks, cb)
unlock(spinlock)
return cb
catch
unlock(spinlock)
rethrow()
end
end
function closure(cb::CallBack)
return (cb.cfunction, pointer_from_objref(cb.cbdata))
end
function null_closure(rt, args)
return (CxxWrap.CxxWrapCore.SafeCFunction(C_NULL, rt, [ args... ; Ptr{Cvoid}]), C_NULL)
end
| Geant4 | https://github.com/JuliaHEP/Geant4.jl.git |
|
[
"Apache-2.0"
] | 0.1.17 | 6936b20a42124581c88a5b0d9cc8b5c234eec682 | code | 28603 | #---Exports from this section----------------------------------------------------------------------
export G4JLDetector, G4JLSimulationData, G4JLApplication, G4JLDetectorGDML, G4JLSDData, G4JLSensitiveDetector,
configure, initialize, reinitialize, beamOn, getSDdata, getSIMdata, getConstructor, getInitializer,
G4JLUniformMagField, G4JLMagneticField, G4JLFieldData, G4JLDisplay
#---Geometry usability functions-------------------------------------------------------------------
G4PVPlacement(r::Union{Nothing, G4RotationMatrix}, d::G4ThreeVector, l::Union{Nothing,G4LogicalVolume}, s::String,
p::Union{Nothing, G4LogicalVolume}, b1::Bool, n::Int, b2::Bool=false) =
G4PVPlacement(isnothing(r) ? CxxPtr{G4RotationMatrix}(C_NULL) : move!(r), d,
isnothing(l) ? CxxPtr{G4LogicalVolume}(C_NULL) : CxxPtr(l), s,
isnothing(p) ? CxxPtr{G4LogicalVolume}(C_NULL) : CxxPtr(p), b1, n, b2)
G4PVPlacement(t::G4Transform3D, l::Union{Nothing,G4LogicalVolume}, s::String,
p::Union{Nothing, G4LogicalVolume}, b1::Bool, n::Int, b2::Bool=false) =
G4PVPlacement(t, isnothing(l) ? CxxPtr{G4LogicalVolume}(C_NULL) : CxxPtr(l), s,
isnothing(p) ? CxxPtr{G4LogicalVolume}(C_NULL) : CxxPtr(p), b1, n, b2)
G4PVReplica(s::String, l::G4LogicalVolume, m::G4LogicalVolume, a::EAxis, n::Int64, t::Float64) =
G4PVReplica(s, CxxPtr(l), CxxPtr(m), a, n, t)
function G4JLDetectorConstruction(f::Function)
sf = make_callback(nothing, f, CxxPtr{G4VPhysicalVolume}, ()) |> closure
G4JLDetectorConstruction(sf...)
end
function G4JLActionInitialization(f::Function)
sf = make_callback(nothing, f, Nothing, (ConstCxxPtr{G4JLActionInitialization},)) |> closure
G4JLActionInitialization(sf..., sf...) # call the construction
end
function G4LogicalVolume(solid::G4VSolid, mat::CxxPtr{G4Material}, name::String,
fldmgr=CxxPtr{G4FieldManager}(C_NULL),
sd=CxxPtr{G4VSensitiveDetector}(C_NULL),
ulimits=CxxPtr{G4UserLimits}(C_NULL),
opt=true)
G4LogicalVolume(move!(solid), mat, name,
fldmgr isa G4FieldManagerAllocated ? move!(fldmgr) : fldmgr,
sd isa CxxPtr ? sd : move!(sd),
ulimits isa G4UserLimitsAllocated ? move!(ulimits) : ulimits, opt)
end
SetUserLimits(lv::G4LogicalVolume, l::G4UserLimitsAllocated) = SetUserLimits(lv, move!(l))
#---Material friendly functions (keyword arguments)------------------------------------------------
using .SystemOfUnits:kelvin, atmosphere
G4Material(name::String; z::Float64=0., a::Float64=0., density::Float64, ncomponents::Integer=0,
state::G4State=kStateUndefined, temperature::Float64=293.15*kelvin, pressure::Float64=1*atmosphere) =
ncomponents == 0 ? G4Material(name, z, a, density, state, temperature, pressure) : G4Material(name, density, ncomponents)
AddElement(mat::G4Material, elem::CxxPtr{G4Element}; fractionmass::Float64=0., natoms::Integer=0) =
fractionmass != 0. ? AddElementByMassFraction(mat, elem, fractionmass) : AddElementByNumberOfAtoms(mat, elem, natoms)
AddMaterial(mat::G4Material, mat2::CxxPtr{G4Material}; fractionmass=1.0) = AddMaterial(mat, mat2, fractionmass)
AddMaterial(mat::G4Material, mat2::G4Material; fractionmass=1.0) = AddMaterial(mat, CxxPtr(mat2), fractionmass)
G4Isotope(name::String; z::Integer, n::Integer, a::Float64=0., mlevel::Integer=0) = G4Isotope(name, z, n, a, mlevel)
#---Better HL interface (more Julia friendly)------------------------------------------------------
abstract type G4JLAbstrcatApp end
abstract type G4JLDetector end
abstract type G4JLSimulationData end
abstract type G4JLSDData end
abstract type G4JLFieldData end
abstract type G4JLDisplay end
getConstructor(d::G4JLDetector) = error("You need to define the function Geant4.getConstructor($(typeof(d))) returning the actual contruct method")
getInitializer(::G4VUserPrimaryGeneratorAction) = nothing
#---G4JLDetectorGDML----(GDML reader)--------------------------------------------------------------
struct G4JLDetectorGDML <: G4JLDetector
fPhysicalWorld::CxxPtr{G4VPhysicalVolume}
end
getConstructor(::G4JLDetectorGDML) = (det::G4JLDetectorGDML) -> det.fPhysicalWorld
"""
G4JLDetectorGDML(gdmlfile::String; check_overlap::Bool, validate_schema::Bool, init_method::Union{Function,Nothing})
Initialize a G4JLDetector from a GDML file. The GDML file is parsed at this moment.
"""
function G4JLDetectorGDML(gdmlfile::String;
check_overlap::Bool=false,
validate_schema::Bool=true,
init_method::Union{Function,Nothing}=nothing)
parser = G4GDMLParser()
!isfile(gdmlfile) && error("GDML File $gdmlfile does not exists")
SetOverlapCheck(parser, check_overlap)
Read(parser, gdmlfile, validate_schema)
world = GetWorldVolume(parser)
if !isnothing(init_method)
init_method(world)
end
G4JLDetectorGDML(world)
end
#---Custom Magnetic Field--------------------------------------------------------------------------
"""
Custom Magnetic Field
"""
mutable struct G4JLMagneticField{UD<:G4JLFieldData}
const name::String
const data::UD
const getfield::Function # signature (result::G4ThreeVector, position::G4ThreeVector, ::SD)
base::Vector{G4JLMagField}
end
"""
getfield(pos::G4ThreeVector, bfield::G4JLMagneticField)
Function to exercise a custom magnetic field
"""
function getfield(pos::G4ThreeVector, bfield::G4JLMagneticField)
B = G4ThreeVector()
bfield.getfield(B, pos, bfield.data)
return B
end
"""
G4JLMagneticField(name::String, data::DATA; <keyword arguments>) where DATA<:G4JLGeneratorData
Create a G4JLMagneticField with its name and associated DATA structure
# Arguments
"""
function G4JLMagneticField(name::String, data::T;
getfield_method=nothing) where T<:G4JLFieldData
isnothing(getfield_method) && error("get field method not defined")
G4JLMagneticField{T}(name, data, getfield_method, G4JLMagField[])
end
#---Implementation (user friendly) Uniform Magnetic Field------------------------------------------
mutable struct G4JLUniformMagFieldData <: G4JLFieldData
field::G4ThreeVector
end
function G4JLMagneticField{G4JLUniformMagFieldData}(field::G4ThreeVector)
data = G4JLUniformMagFieldData(field)
function getfield!(field::G4ThreeVector, pos::G4ThreeVector, data::G4JLUniformMagFieldData)::Nothing
assign(field, data.field)
return
end
G4JLMagneticField("UnifiormB", data; getfield_method=getfield!)
end
const G4JLUniformMagField = G4JLMagneticField{G4JLUniformMagFieldData}
#---SentitiveDetectors-----------------------------------------------------------------------------
struct G4JLSensitiveDetector{UD<:G4JLSDData}
base::G4JLSensDet
data::UD
end
struct G4JLProtoSD{UD<:G4JLSDData}
name::String
data::UD
processhits::Union{Function, Nothing}
initialize::Union{Function, Nothing}
endofevent::Union{Function, Nothing}
end
"""
G4JLSensitiveDetector(name::String, data::DATA; <keyword arguments>) where DATA<:G4JLSDData
Initialize a G4JLSensitiveDetector with its name and associated DATA structure.
# Arguments
- `name::String`: Sensitive detector name
- `data::DATA`: Data structure associted to the sensitive detector
- `processhits_method=nothing`: processHit function with signature: `(data::DATA, step::G4Step, ::G4TouchableHistory)::Bool`
- `initialize_method=nothing`: intialize function with signature: `(data::DATA, ::G4HCofThisEvent)::Nothing`
- `endofevent_method=nothing`: endOfEvent function with signature: `(data::DATA, ::G4HCofThisEvent)::Nothing`
"""
function G4JLSensitiveDetector(name::String, data::T;
processhits_method=nothing,
initialize_method=nothing,
endofevent_method=nothing) where T<:G4JLSDData
isnothing(processhits_method) && error("processHits method for ($T,G4Step,G4TouchableHistory) not defined")
G4JLProtoSD{T}(name, data, processhits_method, initialize_method, endofevent_method)
end
function G4JLSensitiveDetector(sd::G4JLProtoSD)
(;name, data, processhits, initialize, endofevent) = sd
tls_data = deepcopy(data)
cb = make_callback(tls_data, processhits, CxxBool, (CxxPtr{G4Step}, CxxPtr{G4TouchableHistory})) |> closure
base = G4JLSensDet(name, cb...)
if !isnothing(initialize)
cb = make_callback(tls_data, initialize, Nothing, (CxxPtr{G4HCofThisEvent},)) |> closure
SetInitialize(base, cb...)
end
if !isnothing(endofevent)
cb = make_callback(tls_data, endofevent, Nothing, (CxxPtr{G4HCofThisEvent},)) |> closure
SetEndOfEvent(base, cb...)
end
G4JLSensitiveDetector{typeof(data)}(base, tls_data)
end
struct G4JLNoData <: G4JLSimulationData
end
#--Empty Detector----------------------------------------------------------------------------------
struct G4JLEmptyDetector <: G4JLDetector end
using Geant4.PhysicalConstants: universe_mean_density
using Geant4.SystemOfUnits: g, mole, kelvin, pascal, parsec, m
function _construct(::G4JLEmptyDetector)
vacuum = G4Material("Vacuum", z=1., a=1.01g/mole, density=universe_mean_density, state=kStateGas,
temperature=2.73*kelvin, pressure=3.e-18*pascal)
G4PVPlacement(nothing, G4ThreeVector(),
G4LogicalVolume(G4Orb("World", 1m), move!(vacuum), "World"),
"World", nothing, false, 0, false)
end
Geant4.getConstructor(::G4JLEmptyDetector) = _construct
#---Geant4 Application-----------------------------------------------------------------------------
mutable struct G4JLApplication{DET<:G4JLDetector,DAT<:G4JLSimulationData} <: G4JLAbstrcatApp
const runmanager::Any
detector::DET
simdata::Vector{DAT} # Each worker thread has its own data
generator::G4JLPrimaryGenerator
field::Union{Nothing, G4Field, G4JLMagneticField}
evtdisplay::Union{Nothing, G4JLDisplay}
const nthreads::Int32
const verbose::Int32
# Types
const runmanager_type::Type{<:G4RunManager}
const builder_type::Type{<:G4VUserDetectorConstruction}
const physics_type::Type{<:G4VUserPhysicsList}
const runaction_type::Type{<:G4UserRunAction}
const eventaction_type::Type{<:G4UserEventAction}
#stackaction_type::Type{<:G4UserStakingAction}
const trackaction_type::Type{<:G4UserTrackingAction}
const stepaction_type::Type{<:G4UserSteppingAction}
# Methods
const stepaction_method::Union{Nothing,Function}
const pretrackaction_method::Union{Nothing,Function}
const posttrackaction_method::Union{Nothing,Function}
const beginrunaction_method::Union{Nothing,Function}
const endrunaction_method::Union{Nothing,Function}
const begineventaction_method::Union{Nothing,Function}
const endeventaction_method::Union{Nothing,Function}
const stackaction_method::Union{Nothing,Function}
const statechange_method::Union{Nothing, Function}
# Sensitive Detectors
protoSDs::Dict{String,G4JLProtoSD}
sdetectors::Dict{String,Vector{G4JLSensitiveDetector}} # single and MT
# Scoring Meshes
scorers::Vector{G4JLScoringMesh}
# Instances
detbuilder::Any
physics::Any
end
"""
G4JLApplication(<keyword arguments>)
Initialize a G4JLApplication with its associated tyopes and methods.
# Arguments
- `detector::G4JLDetector`: detector description object
- `simdata=G4JLNoData()`: simulation data object
- `generator=G4JLParticleGun()`: primary particle generator
- `field=nothing`: magnetic field
- `evtdisplay=nothing`: event display (visualization)
- `nthreads=0`: number of Geant4 worker threads ( >0 implies MT)
- `verbose=0` : default verbority level (physics, ...)
- `runmanager_type=G4RunManager`: run manager type
- `builder_type=G4JLDetectorConstruction`: detector builder type (the default should be fine most cases)
- `physics_type=FTFP_BERT`: physics list type
- `stepaction_type=G4JLSteppingAction`: stepping action type (the default should be fine most cases)
- `trackaction_type=G4JLTrackingAction`: rtacking action type (the default should be fine most cases)
- `runaction_type=G4JLRunAction`: run action type (the default should be fine most cases)
- `eventaction_type=G4JLEventAction`: event action type (the default should be fine most cases)
- `stepaction_method=nothing`: stepping action method with signature `(::G4Step, ::G4JLApplication)::Nothing`
- `pretrackaction_method=nothing`: pre-tracking action method with signature `(::G4Track, ::G4JLApplication)::Nothing`
- `posttrackaction_method=nothing`: post-tracking action method with signature `(::G4Track, ::G4JLApplication)::Nothing`
- `beginrunaction_method=nothing`: begin run action method with signature `(::G4Run, ::G4JLApplication)::Nothing`
- `endrunaction_method=nothing`: end run action method with signature `(::G4Run, ::G4JLApplication)::Nothing`
- `begineventaction_method=nothing`: begin event action method with signature `(::G4Event, ::G4JLApplication)::Nothing`
- `endeventaction_method=nothing`: end event action method with signature `(::G4Event, ::G4JLApplication)::Nothing`
- `stackaction_method=nothing`: stacking classification of new track with signature `(::G4Track, ::G4JLApplication)::G4ClassificationOfNewTrack`
- `statechange_method=nothing`: state change notifycation method with signature `(from::G4ApplicationState, to::G4ApplicationState, ::G4JLApplication)::Bool`
- `sdetectors::Vector{}=[]`: vector of pairs `lv::String => sd::G4JLSensitiveDetector` to associate logical volumes to sensitive detector
- `scorers::Vector{}=[]`: vector of [`G4JLScoringMesh`](@ref)s
"""
function G4JLApplication(;
detector::G4JLDetector = G4JLEmptyDetector(),
simdata = G4JLNoData(),
generator = G4JLGunGenerator(),
field=nothing,
evtdisplay=nothing,
nthreads=0,
verbose=0,
runmanager_type=G4RunManager,
builder_type=G4JLDetectorConstruction,
physics_type=FTFP_BERT,
stepaction_type=G4JLSteppingAction,
trackaction_type=G4JLTrackingAction,
runaction_type=G4JLRunAction,
eventaction_type=G4JLEventAction,
stepaction_method=nothing,
pretrackaction_method=nothing,
posttrackaction_method=nothing,
beginrunaction_method=nothing,
endrunaction_method=nothing,
begineventaction_method=nothing,
endeventaction_method=nothing,
stackaction_method=nothing,
statechange_method=nothing,
sdetectors=[],
scorers=[],
)
# if nthreads > 0 force G4MTRunManager
runmanager_type = nthreads > 0 ? G4MTRunManager : runmanager_type
# check whether the RunMamager is already constructed
runmgr = G4RunManager!GetRunManager()
if runmgr == C_NULL
runmgr = runmanager_type()
physics = nothing
else
statemgr = G4StateManager!GetStateManager()
SetNewState(statemgr, G4State_PreInit)
physics = isempty(physicslists) ? nothing : last(physicslists)
end
# instantiate G4JLApplication
this = G4JLApplication{typeof(detector), typeof(simdata)}(
runmgr, detector, [deepcopy(simdata) for i in 1:nthreads+1], generator,
field, evtdisplay, nthreads,
verbose, nthreads > 0 ? G4MTRunManager : G4RunManager, builder_type, physics_type,
runaction_type, eventaction_type, trackaction_type, stepaction_type,
stepaction_method, pretrackaction_method, posttrackaction_method,
beginrunaction_method, endrunaction_method, begineventaction_method,
endeventaction_method, stackaction_method, statechange_method,
Dict(sdetectors), Dict{String,Vector{G4JLSensitiveDetector}}(),
scorers, nothing, physics)
# register state change dependent
if !isnothing(statechange_method)
sf = make_callback(this, statechange_method, CxxBool, (G4ApplicationState, G4ApplicationState)) |> closure
G4JLStateDependent(sf...)
end
if !isnothing(evtdisplay)
sf = make_callback(this, evtdisplay.stateChange, CxxBool, (G4ApplicationState, G4ApplicationState)) |> closure
G4JLStateDependent(sf...)
end
return this
end
physicslists = Vector{Any}()
"""
configure(app::G4JLApplication)
Configure the Geant4 application. It sets the declared user actions, event generator, and physcis list.
"""
function configure(app::G4JLApplication)
runmgr = app.runmanager
#---Set the number of threads and declare G4UserWorkerInitialization---------------------------
if app.nthreads > 0
SetNumberOfThreads(runmgr, app.nthreads)
SetUserInitialization(runmgr, move!(G4JLWorkerInitialization()))
end
#---Prepare SentitiveDetectors-----------------------------------------------------------------
for (lv,sd) in app.protoSDs
app.sdetectors[sd.name] = Vector{G4JLSensitiveDetector}(undef, app.nthreads + 1)
end
#---Prepare Primary Generators-----------------------------------------------------------------
app.generator.base = Vector{G4JLGeneratorAction}(undef, app.nthreads + 1)
#---Prepare Primary Generators-----------------------------------------------------------------
if app.field isa G4JLMagneticField
app.field.base = Vector{G4JLMagField}(undef, app.nthreads + 1)
end
#---Detector construction----------------------------------------------------------------------
function sdandf(app::G4JLApplication)::Nothing # called by the worker thread during init------
#---Add the Sensitive Detectors now that the geometry is constructed-----------------------
tid = G4Threading!G4GetThreadId()
tid < 0 && (tid = -1) # master thread (-2 for without multi-threading support)
for (lv,protosd) in app.protoSDs
sd = G4JLSensitiveDetector(protosd)
app.sdetectors[protosd.name][tid+2] = sd
if lv[end] == '+'
lv = lv[1:end-1]
multi = true
else
multi = false
end
SetSensitiveDetector(app.detbuilder, lv, CxxPtr(sd.base), multi)
end
#---Add Magnetic field if needed-----------------------------------------------------------
fieldMgr = G4TransportationManager!GetTransportationManager() |> GetFieldManager
if app.field isa G4Field
B = Clone(app.field)
SetDetectorField(fieldMgr, B)
CreateChordFinder(fieldMgr, CxxPtr{G4MagneticField}(B))
elseif app.field isa G4JLMagneticField
sf = make_callback(app.field.data, app.field.getfield, Nothing, (CxxRef{G4ThreeVector}, ConstCxxRef{G4ThreeVector})) |> closure
B = G4JLMagField(sf...)
app.field.base[tid+2] = B
SetDetectorField(fieldMgr, CxxPtr(B))
CreateChordFinder(fieldMgr, CxxPtr(B))
end
nothing
end
det = app.detector
cb1 = make_callback(det, getConstructor(det), CxxPtr{G4VPhysicalVolume}, ()) |> closure
cb2 = make_callback(app, sdandf, Nothing, ()) |> closure
app.detbuilder = app.builder_type(cb1..., cb2...)
SetUserInitialization(runmgr, CxxPtr(app.detbuilder))
#---Physics List---------------------------------------------------------------------------
if isnothing(app.physics) # we need to construct the physics list only once
physics = app.physics_type(app.verbose)
app.physics = CxxPtr(physics)
push!(physicslists, app.physics)
SetUserInitialization(runmgr, move!(physics))
end
#---Actions--------------------------------------------------------------------------------
function build(uai::G4JLActionInitialization, app::G4JLApplication)::Nothing
if !isnothing(app.stepaction_method)
cb = make_callback(app, app.stepaction_method, Nothing, (ConstCxxPtr{G4Step},))
SetUserAction(uai, move!(app.stepaction_type(closure(cb)...)))
end
#---Tracking Action---------------------------------------------------------------------------
if !isnothing(app.pretrackaction_method)
t1 = make_callback(app, app.pretrackaction_method, Nothing, (ConstCxxPtr{G4Track},)) |> closure
else
t1 = null_closure(Nothing, (ConstCxxPtr{G4Track},))
end
if !isnothing(app.posttrackaction_method)
t2 = make_callback(app, app.posttrackaction_method, Nothing, (ConstCxxPtr{G4Track},)) |> closure
else
t2 = null_closure(Nothing, (ConstCxxPtr{G4Track},))
end
if !isnothing(app.pretrackaction_method) || !isnothing(app.posttrackaction_method)
SetUserAction(uai, move!(app.trackaction_type(t1..., t2...)))
end
#---Run Action---------------------------------------------------------------------------
if !isnothing(app.beginrunaction_method)
r1 = make_callback(app, app.beginrunaction_method, Nothing, (ConstCxxPtr{G4Run},)) |> closure
else
r1 = null_closure(Nothing, (ConstCxxPtr{G4Run},))
end
if !isnothing(app.endrunaction_method)
r2 = make_callback(app, app.endrunaction_method, Nothing, (ConstCxxPtr{G4Run},)) |> closure
else
r2 = null_closure(Nothing, (ConstCxxPtr{G4Run},))
end
if !isnothing(app.beginrunaction_method) || !isnothing(app.endrunaction_method)
SetUserAction(uai, move!(app.runaction_type(r1..., r2...)))
end
#---Event Action---------------------------------------------------------------------------
if !isnothing(app.begineventaction_method)
e1 = make_callback(app, app.begineventaction_method, Nothing, (ConstCxxPtr{G4Event},)) |> closure
else
e1 = null_closure(Nothing, (ConstCxxPtr{G4Event},))
end
if !isnothing(app.endeventaction_method)
e2 = make_callback(app, app.endeventaction_method, Nothing, (ConstCxxPtr{G4Event},)) |> closure
else
e2 = null_closure(Nothing, (ConstCxxPtr{G4Event},))
end
if !isnothing(app.begineventaction_method) || !isnothing(app.endeventaction_method)
SetUserAction(uai, move!(app.eventaction_type(e1..., e2...)))
end
#---Staking acrion------------------------------------------------------------------------
if !isnothing(app.stackaction_method)
cd = make_callback(app, app.stackaction_method, G4ClassificationOfNewTrack, (ConstCxxPtr{G4Track},)) |> closure
SetUserAction(uai, move!(G4JLStackingAction(cd...)))
end
#---Primary particles generator---------(per thread)--------------------------------------
gen = app.generator
tid = G4Threading!G4GetThreadId()
tid < 0 && (tid = -1) # master thread (-2 for without multi-threading support)
g1 = make_callback(gen.data, gen.gen_method, Nothing, (CxxPtr{G4Event},)) |> closure
gen.base[tid+2] = G4JLGeneratorAction(g1...)
init_method = gen.init_method
!isnothing(init_method) && init_method(gen.data, app)
SetUserAction(uai, CxxPtr(gen.base[tid+2]))
end
function master_build(uai::G4JLActionInitialization, app::G4JLApplication)::Nothing
#---Run Action---------------------------------------------------------------------------
if !isnothing(app.beginrunaction_method)
r1 = make_callback(app, app.beginrunaction_method, Nothing, (ConstCxxPtr{G4Run},)) |> closure
else
r1 = null_closure(Nothing, (ConstCxxPtr{G4Run},))
end
if !isnothing(app.endrunaction_method)
r2 = make_callback(app, app.endrunaction_method, Nothing, (ConstCxxPtr{G4Run},)) |> closure
else
r2 = null_closure(Nothing, (ConstCxxPtr{G4Run},))
end
if !isnothing(app.beginrunaction_method) || !isnothing(app.endrunaction_method)
SetUserAction(uai, move!(app.runaction_type(r1..., r2...)))
end
end
#---User Actions Initialization------------------------------------------------------------
cb1 = make_callback(app, build, Nothing, (ConstCxxPtr{G4JLActionInitialization},)) |> closure
cb2 = make_callback(app, master_build, Nothing, (ConstCxxPtr{G4JLActionInitialization},)) |> closure
ai = G4JLActionInitialization(cb1..., cb2...)
SetUserInitialization(runmgr, move!(ai))
#---Setup Scoring--------------------------------------------------------------------------
if !isempty(app.scorers)
G4ScoringManager!GetScoringManager()
end
end
"""
initialize(app::G4JLApplication)
Initialize the Geant4 application. It initializes the RunManager, which constructs the detector geometry, and sets
the declared sensitive detectors.
"""
function initialize(app::G4JLApplication)
Initialize(app.runmanager)
#---Process scorers------------------------------------------------------------------------
for sc in app.scorers
uicmd = toUIstring(sc)
uimgr = G4UImanager!GetUIpointer()
for s = eachsplit(uicmd,'\n')
ApplyCommand(uimgr, String(s)) == 0 || error("Got an error processing UI command '$s'")
end
ApplyCommand(uimgr, "/score/close")
ApplyCommand(uimgr, "/score/list")
end
#---Initialize Event Display---------------------------------------------------------------
if ! isnothing(app.evtdisplay)
app.evtdisplay.initDisplay(app.evtdisplay)
end
end
"""
reinitialize(app::G4JLApplication, det::G4JLDetector)
Re-initialize the Geant4 application with a new detector defintion.
"""
function reinitialize(app::G4JLApplication, det::G4JLDetector)
app.detector = det
runmgr = app.runmanager
cb1 = make_callback(det, getConstructor(det), CxxPtr{G4VPhysicalVolume}, ()) |> closure
SetUserInitialization(runmgr, move!(app.builder_type(cb1...)))
ReinitializeGeometry(runmgr)
Initialize(runmgr)
#---Initialize Event Display---------------------------------------------------------------
if ! isnothing(app.evtdisplay)
app.evtdisplay.initDisplay(app.evtdisplay)
end
end
"""
beamOn(app::G4JLApplication, nevents::Int)
Start a new run with `nevents` events.
"""
function beamOn(app::G4JLApplication, nevents::Int)
statemgr = G4StateManager!GetStateManager()
in_state = GetCurrentState(statemgr)[]
try
if app.nthreads > 0
# before starting the run (creation of worker threads) we need to enter GC safe state not
# to block any garbage collection.
state = ccall(:jl_gc_safe_enter,Cint,())
BeamOn(app.runmanager, nevents)
ccall(:jl_gc_safe_leave,Cint,(Cint,), state)
else
BeamOn(app.runmanager, nevents)
end
catch
SetNewState(statemgr, in_state)
rethrow()
end
return
end
"""
getSDdata(app::G4JLApplication, name::String)
Get the data associated to the Sentitive Detector with a given name taking into account the current worker thread ID
"""
function getSDdata(app, name)
tid = G4Threading!G4GetThreadId()
tid < 0 && (tid = -1) # master thread (-2 for without multi-threading support)
app.sdetectors[name][tid+2].data
end
"""
getSIMdata(app::G4JLApplication)
Get the Simulation Data taking into account the current worker thread ID
"""
function getSIMdata(app)
tid = G4Threading!G4GetThreadId()
tid < 0 && (tid = -1) # master thread (-2 for without multi-threading support)
app.simdata[tid+2]
end
"""
GetWorldVolume()
Get the world volume of the currently instantiated detector geometry.
"""
GetWorldVolume() = GetWorldVolume(GetNavigatorForTracking(G4TransportationManager!GetTransportationManager()))[]
"""
GetVolume(name::String)
Get the the G4LogicalVolume with this name.
"""
GetVolume(name::String) = GetVolume(G4LogicalVolumeStore!GetInstance(), name)
| Geant4 | https://github.com/JuliaHEP/Geant4.jl.git |
|
[
"Apache-2.0"
] | 0.1.17 | 6936b20a42124581c88a5b0d9cc8b5c234eec682 | code | 14253 | #---Exports from this section----------------------------------------------------------------------
export G4JLGunGenerator, G4JLGeneralParticleSource, G4JLPrimaryGenerator, G4JLGeneratorData
#---Abstract Generator Data (Parameters)------------------------------------------------------------
abstract type G4JLGeneratorData end
#---Primary Particle Generator----------------------------------------------------------------------
mutable struct G4JLPrimaryGenerator{UD<:G4JLGeneratorData}
const name::String
const data::UD
const init_method::Function # (::UD, ::G4JLApplication)::Nothing
const gen_method::Function # (::CxxPtr{G4Event}, ::UD)::Nothing
base::Vector{G4JLGeneratorAction}
end
"""
G4JLPrimaryGenerator(name::String, data::DATA; <keyword arguments>) where DATA<:G4JLGeneratorData
Creatre a G4JLPrimaryGenerator with its name and associated DATA structure
# Arguments
"""
function G4JLPrimaryGenerator(name::String, data::T;
init_method=nothing,
generate_method=nothing) where T<:G4JLGeneratorData
isnothing(generate_method) && error("primary particle generator method not defined")
G4JLPrimaryGenerator{T}(name, data, init_method, generate_method, G4JLGeneratorAction[])
end
#--------------------------------------------------------------------------------------------------
#---Implementation (user friendly) Particle Gun----------------------------------------------------
#--------------------------------------------------------------------------------------------------
mutable struct G4JLParticleGunData <: G4JLGeneratorData
gun::Union{Nothing, CxxPtr{G4ParticleGun}}
particle::String
direction::G4ThreeVector
position::G4ThreeVector
energy::Float64
end
function G4JLPrimaryGenerator{G4JLParticleGunData}(;particle="e-", energy=10., direction=G4ThreeVector(), position=G4ThreeVector())
data = G4JLParticleGunData(nothing, particle, direction, position, energy)
function init(data::G4JLParticleGunData, ::Any)
pg = data.gun = move!(G4ParticleGun())
SetParticleByName(pg, data.particle)
SetParticleEnergy(pg, data.energy)
SetParticleMomentumDirection(pg, data.direction)
SetParticlePosition(pg, data.position)
end
function gen(evt::G4Event, data::G4JLParticleGunData)::Nothing
GeneratePrimaryVertex(data.gun, CxxPtr(evt))
end
G4JLPrimaryGenerator("ParticleGun", data; init_method=init, generate_method=gen)
end
const G4JLGunGenerator = G4JLPrimaryGenerator{G4JLParticleGunData}
function SetParticleByName(gen::G4JLGunGenerator, particle::String)
gen.data.particle=particle
SetParticleByName(gen.data.gun, particle)
end
function SetParticleEnergy(gen::G4JLGunGenerator, energy::Float64)
gen.data.energy=energy
SetParticleEnergy(gen.data.gun, energy)
end
function SetParticleMomentumDirection(gen::G4JLGunGenerator, direction::G4ThreeVector)
gen.data.direction=direction
SetParticleMomentumDirection(gen.data.gun, direction)
end
function SetParticlePosition(gen::G4JLGunGenerator, position::G4ThreeVector)
gen.data.direction=position
SetParticlePosition(gen.data.gun, position)
end
#--------------------------------------------------------------------------------------------------
#---Implementation (user friendly) General Particle Source-----------------------------------------
#--------------------------------------------------------------------------------------------------
mutable struct G4JLGPSData <: G4JLGeneratorData
gps::Union{Nothing, CxxPtr{G4GeneralParticleSource}}
sources::Vector{<:NamedTuple}
multiplevertex::Bool
flatsampling::Bool
end
function G4JLPrimaryGenerator{G4JLGPSData}(;kwargs...)
if haskey(kwargs, :sources)
data = G4JLGPSData(nothing, kwargs.sources,
haskey(kwargs,:multiplevertex) ? kwargs.multiplevertex : false,
haskey(kwargs,:flatsampling) ? kwargs.flatsampling : false)
else
data = G4JLGPSData(nothing, [NamedTuple(kwargs)], false, false)
end
function gen(evt::G4Event, data::G4JLGPSData)::Nothing
GeneratePrimaryVertex(data.gps, CxxPtr(evt))
end
G4JLPrimaryGenerator("GPS", data; init_method=initGPS, generate_method=gen)
end
const G4JLGeneralParticleSource = G4JLPrimaryGenerator{G4JLGPSData}
function initGPS(data::G4JLGPSData, ::Any)
gps = data.gps = move!(G4GeneralParticleSource())
for (idx, source) in enumerate(data.sources)
if idx != 1
AddaSource(gps, 1.)
end
current = gps |> GetCurrentSource
#---First level (:intensity, :position, :particle, :direction, :energy)
for attr in fieldnames(typeof(source))
if attr == :intensity
SetCurrentSourceIntensity(gps, source.intensity)
elseif attr == :position
pos = current |> GetPosDist
SetPosDisType(pos, "Point")
SetCentreCoords(pos, source.position)
elseif attr == :direction
ang = current |> GetAngDist
SetAngDistType(ang, "planar")
SetParticleMomentumDirection(ang, source.direction)
elseif attr == :energy
ene = current |> GetEneDist
SetMonoEnergy(ene, source.energy)
elseif attr == :particle
SetParticleDefinition(current, source.particle |> FindParticle)
elseif attr == :pos
pos = current |> GetPosDist
for sattr in fieldnames(typeof(source.pos))
if sattr == :type
SetPosDisType(pos, source.pos.type)
elseif sattr == :shape
SetPosDisShape(pos, source.pos.shape)
elseif sattr == :centre
SetCentreCoords(pos, source.pos.centre)
elseif sattr == :rot1
SetPosRot1(pos, source.pos.rot1)
elseif sattr == :rot2
SetPosRot2(pos, source.pos.rot2)
elseif sattr == :halfx
SetHalfX(pos, source.pos.halfx)
elseif sattr == :halfy
SetHalfY(pos, source.pos.halfy)
elseif sattr == :halfz
SetHalfZ(pos, source.pos.halfz)
elseif sattr == :radius
SetRadius(pos, source.pos.radius)
elseif sattr == :inner_radius
SetRadius0(pos, source.pos.inner_radius)
elseif sattr == :sigma_r
SetBeamSigmaInR(pos, source.pos.sigma_r)
elseif sattr == :sigma_x
SetBeamSigmaInX(pos, source.pos.sigma_x)
elseif sattr == :sigma_y
SetBeamSigmaInY(pos, source.pos.sigma_y)
elseif sattr == :paralp
SetParAlpha(pos, source.pos.paralp)
elseif sattr == :parthe
SetParTheta(pos, source.pos.parthe)
elseif sattr == :parphi
SetParPhi(pos, source.pos.parphi)
elseif sattr == :confine
ConfineSourceToVolume(pos, source.pos.confine)
else
error("$sattr is not an attribute of particle position distribution")
end
end
elseif attr == :ang
ang = current |> GetAngDist
for sattr in fieldnames(typeof(source.ang))
if sattr == :type
SetAngDistType(ang, source.ang.type)
elseif sattr == :rot1
DefineAngRefAxes(ang, "angref1", source.ang.rot1)
elseif sattr == :rot2
DefineAngRefAxes(ang, "angref2", source.ang.rot2)
elseif sattr == :mintheta
SetMinTheta(ang, source.ang.mintheta)
elseif sattr == :maxtheta
SetMaxTheta(ang, source.ang.maxtheta)
elseif sattr == :minphi
SetMinPhi(ang, source.ang.minphi)
elseif sattr == :maxphi
SetMaxPhi(ang, source.ang.maxphi)
elseif sattr == :sigma_r
SetBeamSigmaInAngR(ang, source.ang.sigma_r)
elseif sattr == :sigma_x
SetBeamSigmaInAngX(ang, source.ang.sigma_x)
elseif sattr == :sigma_y
SetBeamSigmaInAngY(ang, source.ang.sigma_y)
elseif sattr == :focuspoint
SetFocusPoint(ang, source.ang.focuspoint)
elseif sattr == :user_coor
SetUseUserAngAxis(ang, source.ang.user_coor)
elseif sattr == :surfnorm
SetUserWRTSurface(ang, source.ang.surfnorm)
else
error("$sattr is not an attribute of particle angular distribution")
end
end
elseif attr == :ene
ene = current |> GetEneDist
for sattr in fieldnames(typeof(source.ene))
if sattr == :type
SetEnergyDisType(ene, source.ene.type)
elseif sattr == :min
SetEmin(ene, source.ene.min)
elseif sattr == :max
SetEmax(ene, source.ene.max)
elseif sattr == :mono
SetMonoEnergy(ene, source.ene.mono)
elseif sattr == :sigma
SetBeamSigmaInE(ene, source.ene.sigma)
elseif sattr == :alpha
SetAlpha(ene, source.ene.alpha)
elseif sattr == :temp
SetTemp(ene, source.ene.temp)
elseif sattr == :ezero
SetEzero(ene, source.ene.ezero)
elseif sattr == :gradient
SetGradient(ene, source.ene.gradient)
elseif sattr == :intercept
SetInterCept(ene, source.ene.intercept)
elseif sattr == :biasAlpha
SetBiasAlpha(ene, source.ene.biasAlpha)
elseif sattr == :calculate
Calculate(ene)
elseif sattr == :emspec
InputEnergySpectra(ene, source.ene.emspec)
elseif sattr == :diffspec
InputDifferentialSpectra(ene, source.ene.diffspec)
elseif sattr == :applyEneWeight
ApplyEnergyWeight(ene, source.ene.applyEneWeight)
else
error("$sattr is not an attribute of particle energy distribution")
end
end
else
error("$attr is not an attribute of single particle source")
end
end
end
end
function reinitialize(gen::G4JLGeneralParticleSource; kwargs...)
isnothing(gen.data.gps) && error("GeneralParticleSource has not been instantiated")
data = gen.data
if haskey(kwargs, :sources)
data.sources = kwargs[:sources]
haskey(kwargs,:multiplevertex) && (data.multiplevertex = kwargs[:multiplevertex])
haskey(kwargs,:flatsampling) && (data.flatsampling = kwargs[:flatsampling])
else
data.sources = [ NamedTuple(kwargs) ]
end
initGPS(data, nothing)
end
#=
/gps/pos
type * Sets source distribution type.
shape * Sets source shape for Plan, Surface or Volume type source.
centre * Set centre coordinates of source.
rot1 * Set the 1st vector defining the rotation matrix'.
rot2 * Set the 2nd vector defining the rotation matrix'.
halfx * Set x half length of source.
halfy * Set y half length of source.
halfz * Set z half length of source.
radius * Set radius of source.
inner_radius * Set inner radius of source when required.
sigma_r * Set standard deviation in radial of the beam positional profile
sigma_x * Set standard deviation of beam positional profile in x-dir
sigma_y * Set standard deviation of beam positional profile in y-dir
paralp * Angle from y-axis of y' in Para
parthe * Polar angle through centres of z faces
parphi * Azimuth angle through centres of z faces
confine * Confine source to volume (NULL to unset).
/gps/ang
type * Sets angular source distribution type
rot1 * Sets the 1st vector for angular distribution rotation matrix
rot2 * Sets the 2nd vector for angular distribution rotation matrix
mintheta * Set minimum theta
maxtheta * Set maximum theta
minphi * Set minimum phi
maxphi * Set maximum phi
sigma_r * Set standard deviation in direction for 1D beam.
sigma_x * Set standard deviation in direction in x-direc. for 2D beam
sigma_y * Set standard deviation in direction in y-direc. for 2D beam
focuspoint * Set the focusing point for the beam
user_coor * True for using user defined angular co-ordinates
surfnorm * Makes a user-defined distribution with respect to surface normals rather than x,y,z axes.
/gps/ene
type * Sets energy distribution type
min * Sets minimum energy
max * Sets maximum energy
mono * Sets a monocromatic energy (same as gps/energy)
sigma * Sets the standard deviation for Gaussian energy dist.
alpha * Sets Alpha (index) for power-law energy dist.
temp * Sets the temperature for Brem and BBody distributions (in Kelvin)
ezero * Sets E_0 for exponential distribution (in MeV)
gradient * Sets the gradient for Lin distribution (in 1/MeV)
intercept * Sets the intercept for Lin distributions (in MeV)
biasAlpha * Sets the power-law index for the energy sampling distri. )
calculate * Calculates the distributions for Cdg and BBody
emspec * True for energy and false for momentum spectra
diffspec * True for differential and flase for integral spectra
applyEneWeight * Apply energy weight.
=#
| Geant4 | https://github.com/JuliaHEP/Geant4.jl.git |
|
[
"Apache-2.0"
] | 0.1.17 | 6936b20a42124581c88a5b0d9cc8b5c234eec682 | code | 4403 | #---Exports from this section----------------------------------------------------------------------
export G4JLScoringMesh, BoxMesh, CylinderMesh, energyDeposit, doseDeposit, nOfStep, ParticleFilter
using CSV
#---Scoring structures-----------------------------------------------------------------------------
abstract type AbstractMesh end
abstract type AbstractFilter end
struct ParticleFilter <: AbstractFilter
name::String
pname::String
end
toUIstring(f::ParticleFilter) = "/score/filter/particle $(f.name) $(f.pname)"
struct ScoringQuantity
name::String
qtype::Symbol # :energyDeposit, :cellCharge, :passageCellFlux, :doseDeposit, :nOfStep, :nOfSecondary, ....
qunit::Symbol # unit
filters::Vector{AbstractFilter}
end
energyDeposit(name, qunit="MeV"; filters = AbstractFilter[]) = ScoringQuantity(name, :energyDeposit, Symbol(qunit), filters)
doseDeposit(name, qunit="Gy"; filters = AbstractFilter[]) = ScoringQuantity(name, :doseDeposit, Symbol(qunit), filters)
nOfStep(name; filters = AbstractFilter[]) = ScoringQuantity(name, :nOfStep, Symbol(""), filters)
toUIstring(q::ScoringQuantity) = *("/score/quantity/$(q.qtype) $(q.name) $(q.qunit)", ["\n"*toUIstring(f) for f in q.filters]...)
struct BoxMesh <: AbstractMesh
dx::Float64
dy::Float64
dz::Float64
unit::Symbol
BoxMesh(dx, dy, dz, un=:mm) = new(dx, dy, dz, un)
end
toMstring(::BoxMesh) = "boxMesh"
toUIstring(m::BoxMesh) = "/score/mesh/boxSize $(m.dx) $(m.dy) $(m.dz) $(m.unit)"
struct CylinderMesh <: AbstractMesh
r::Float64
dz::Float64
unit::Symbol
end
toMstring(::CylinderMesh) = "cylinderMesh"
toUIstring(m::CylinderMesh) = "/score/mesh/cylinderSize $(m.r) $(m.dz) $(m.init)"
struct G4JLScoringMesh{M<:AbstractMesh}
name::String
mesh::M # :box, :cylinder, :probe
bins::Tuple{Int, Int, Int} # nX, nY, nZ
translation::Union{Nothing, Tuple{Float64, Float64, Float64}}
rotation::Union{Nothing, Tuple{Float64, Float64, Float64}}
quantities::Vector{ScoringQuantity}
end
"""
G4JLScoringMesh(name, mesh; <keyword arguments>)
Create a scoring mesh to be added to the Geant4 application.
# Arguments
- `name::String`: scoring mech name
- `mesh::AbstractMesh`: mesh instance. Either a `BoxMesh` or `CylinderMesh`
- `bins::Tuple`: tuple with number on bins in x, y, z (default 30, 30, 30)
- `translation::Tuple`: position (x,y,z) with respect the mesh respect to the world volume. Default is (0,0,0).
- `rotation::Tuple`: rotation of the mesh with respect the world volume. Default (0,0,0)
- `quantities::Vector`: vector of quanties to be scored (e.g. `energyDeposit`, `doseDeposit`, `nOfStep`)
"""
function G4JLScoringMesh(name::String, mesh::M;
bins = (30,30,30),
translation = nothing,
rotation = nothing,
quantities = ScoringQuantity[]) where M <: AbstractMesh
G4JLScoringMesh{M}(name, mesh, bins, translation, rotation, quantities)
end
function toUIstring(sm::G4JLScoringMesh)
r = "/score/create/$(toMstring(sm.mesh)) $(sm.name)\n" * toUIstring(sm.mesh) *
"\n/score/mesh/nBin $(sm.bins[1]) $(sm.bins[2]) $(sm.bins[3])"
if !isnothing(sm.translation)
dx, dy, dz = sm.translation
r *= "\n/score/mesh/translate/xyz $dx $dy $dz mm"
end
if !isnothing(sm.rotation)
rx, ry, rz = sm.rotation
r *= "\n/score/mesh/rotate/rotateX $rx"
r *= "\n/score/mesh/rotate/rotateY $ry"
r *= "\n/score/mesh/rotate/rotateZ $ry"
end
for q in sm.quantities
r *= "\n" * toUIstring(q)
end
return r
end
function getScoringValues(name::String, quantity::String, bins::Tuple{Int, Int, Int})
fname = tempname()
sc = ApplyCommand(G4UImanager!GetUIpointer(), "/score/dumpQuantityToFile $name $quantity $fname")
sc != 0 && return
csv = CSV.File(fname; comment="#", header=["iX", "iY", "iZ", "total", "total2", "entry"])
(permutedims(reshape(csv[c], bins[3], bins[2], bins[1]),(3,2,1)) for c in (:total, :total2, :entry))
end
function Base.getproperty(sm::G4JLScoringMesh{T}, f::Symbol) where T
qname = String(f)
if any(q.name == qname for q in Base.getfield(sm, :quantities))
getScoringValues(Base.getfield(sm, :name), qname, Base.getfield(sm, :bins))
else
Base.getfield(sm, f)
end
end
| Geant4 | https://github.com/JuliaHEP/Geant4.jl.git |
|
[
"Apache-2.0"
] | 0.1.17 | 6936b20a42124581c88a5b0d9cc8b5c234eec682 | code | 3365 | #---Exports from this section----------------------------------------------------------------------
export G4ThreeVector, G4RotationMatrix, G4Transform3D
export G4Random, G4Random!getTheSeed, G4Random!setTheSeed, G4Random!getTheEngine, G4Random!setTheEngine
export G4RandFlat, G4RandBit, G4RandGamma, G4RandGauss, G4RandExponential, G4RandGeneral
export CxxPtr, ConstCxxPtr, CxxRef, ConstCxxRef, move!, preserve, @ui_cmd, StdVector
#---Useful Geant4 Typedefs-------------------------------------------------------------------------
typedef(t, n) = isdefined(Geant4, t) && eval(:(const $n = $t))
typedef(:CLHEP!HepRotation, :G4RotationMatrix)
typedef(:CLHEP!Hep3Vector, :G4ThreeVector)
typedef(:CLHEP!HepRandom, :G4Random)
typedef(:HepGeom!Transform3D, :G4Transform3D)
typedef(:CLHEP!RandFlat, :G4RandFlat)
typedef(:CLHEP!RandBit, :G4RandBit)
typedef(:CLHEP!RandGamma, :G4RandGamma)
typedef(:CLHEP!RandGaussQ, :G4RandGaussQ)
typedef(:CLHEP!RandExponential, :G4RandExponential)
typedef(:CLHEP!RandGeneral, :G4RandGeneral)
typedef(:CLHEP!HepRandom!getTheSeed, :G4Random!getTheSeed)
typedef(:CLHEP!HepRandom!setTheSeed, :G4Random!setTheSeed)
typedef(:CLHEP!HepRandom!getTheEngine, :G4Random!getTheEngine)
typedef(:CLHEP!HepRandom!setTheEngine, :G4Random!setTheEngine)
Base.show(io::IO, p::G4ThreeVector) = print(io, "G4ThreeVector($(x(p)),$(y(p)),$(z(p)))")
"""
move!(o)
Move ownerhip of C++ object to receiver. The object cannot not be used anymore after this call.
"""
function move!(a)
r = CxxPtr(a)
a.cpp_object = C_NULL
return r
end
#---Keep a reference to SafeCFuntion(s) to avoid GC to remove them---------------------------------
_cfuncs = Vector{CxxWrap.SafeCFunction}()
function preserve(f::CxxWrap.SafeCFunction)
global _cfuncs
push!(_cfuncs, f)
return f
end
#---Cast operations-------------------------------------------------------------------------------
Base.convert(::Type{CxxPtr{G4VPhysicalVolume}}, o::G4PVPlacement) = CxxPtr{G4VPhysicalVolume}(CxxPtr(o))
Base.convert(::Type{CxxPtr{G4VPhysicalVolume}}, o::G4PVReplica) = CxxPtr{G4VPhysicalVolume}(CxxPtr(o))
Base.convert(::Type{G4String}, s::String) = make_G4String(s)
#---UI commands processing------------------------------------------------------------------------
macro ui_cmd(str)
ex = Expr(:block)
ex.args = [:(ApplyCommand(G4UImanager!GetUIpointer(), String($s))) for s = eachsplit(str,'\n')]
ex
end
#---Iteration G4ProcessVector
function Base.iterate(iter::G4ProcessVector)
Geant4.entries(iter) == 0 && return nothing
return (iter[0], 0)
end
function Base.iterate(iter::G4ProcessVector, i::Int)
i = i + 1
i >= Geant4.entries(iter) && return nothing
return (iter[i], i)
end
#---Iteration G4LogicalVolumeStore
function Base.iterate(iter::CxxPtr{G4LogicalVolumeStore})
Geant4.size(iter) == 0 && return nothing
return (GetVolume(iter, 0), 0)
end
function Base.iterate(iter::CxxPtr{G4LogicalVolumeStore}, i::Int)
i = i + 1
i >= Geant4.size(iter) && return nothing
return (GetVolume(iter, i), i)
end
#---Iteration G4TrajectoryContainer
function Base.iterate(iter::G4TrajectoryContainer)
Geant4.size(iter) == 0 && return nothing
return (iter[0], 0)
end
function Base.iterate(iter::G4TrajectoryContainer, i::Int)
i = i + 1
i >= Geant4.size(iter) && return nothing
return (iter[i], i)
end
| Geant4 | https://github.com/JuliaHEP/Geant4.jl.git |
|
[
"Apache-2.0"
] | 0.1.17 | 6936b20a42124581c88a5b0d9cc8b5c234eec682 | code | 1917 | module Geant4
using CxxWrap
using Geant4_jll
using Libdl
# Check whether the wrappers have been build locally otherwise use the binary package Geant4_julia_jll
gendir = normpath(joinpath(@__DIR__, "../gen"))
if isdir(joinpath(gendir, "build/lib"))
include(joinpath(gendir, "jl/Geant4-export.jl"))
@wrapmodule(()->joinpath(gendir, "build/lib", "libGeant4Wrap.$(Libdl.dlext)"))
else
using Geant4_julia_jll
include(Geant4_julia_jll.Geant4_exports)
@wrapmodule(()->Geant4_julia_jll.libGeant4Wrap)
end
function __init__()
@initcxx
#---Call Wrapper init--------------------------------------------------
G4JL_init()
#---Setup [data] environment-------------------------------------------
GEANT4_DATA_DIR = Base.get(ENV, "GEANT4_DATA_DIR", Geant4_jll.data_dir)
for line in readlines(joinpath(Geant4_jll.artifact_dir, "bin/geant4.sh"))
m = match(r"export[ ]+(G4.*)=.*/(.*)$", line)
if !isnothing(m)
G4JL_setenv(String(m[1]), joinpath(GEANT4_DATA_DIR, m[2]))
end
end
end
include("SystemOfUnits.jl")
include("PhysicalConstants.jl")
include("G4Utils.jl")
include("G4Scoring.jl")
include("G4CallBacks.jl")
include("G4ParticleGenerators.jl")
include("G4JLInterface.jl")
#---G4Vis--------------------------------------------------------------------------------------
export draw, draw!, drawDistanceToOut, G4JLEventDisplay
drawDistanceToOut() = "Not implemented"
draw() = "Not implemented"
draw!() = "Not implemented"
G4JLEventDisplay(::Int64) = "Not implemented"
#---G4Hist-------------------------------------------------------------------------------------
export H1D, H2D
H1D() = "Not implemented" # Constructors
H2D() = "Not implemented" # Constructors
end
| Geant4 | https://github.com/JuliaHEP/Geant4.jl.git |
|
[
"Apache-2.0"
] | 0.1.17 | 6936b20a42124581c88a5b0d9cc8b5c234eec682 | code | 3035 | # From the file provided by Geant4 (simulation toolkit for HEP).
#
# The basic units are :
# millimeter
# nanosecond
# Mega electron Volt
# positon charge
# degree Kelvin
# amount of substance (mole)
# luminous intensity (candela)
# radian
# steradian
module PhysicalConstants
using Geant4.SystemOfUnits
using Geant4.SystemOfUnits: parsec
const Avogadro = 6.02214076e+23/mole
# c = 299.792458 mm/ns
# c^2 = 898.7404 (mm/ns)^2
const c_light = 2.99792458e+8 * m/s
const c_squared = c_light * c_light
# h = 4.13566e-12 MeV*ns
# hbar = 6.58212e-13 MeV*ns
# hbarc = 197.32705e-12 MeV*mm
const h_Planck = 6.62607015e-34 * joule*s
const hbar_Planck = h_Planck/2pi
const hbarc = hbar_Planck * c_light
const hbarc_squared = hbarc * hbarc
const electron_charge = - eplus # see SystemOfUnits.h
const e_squared = eplus * eplus
# amu_c2 - atomic equivalent mass unit
# - AKA, unified atomic mass unit (u)
# amu - atomic mass unit
const electron_mass_c2 = 0.510998910 * MeV
const proton_mass_c2 = 938.272013 * MeV
const neutron_mass_c2 = 939.56536 * MeV
const amu_c2 = 931.494028 * MeV
const amu = amu_c2/c_squared
# permeability of free space mu0 = 2.01334e-16 Mev*(ns*eplus)^2/mm
# permittivity of free space epsil0 = 5.52636e+10 eplus^2/(MeV*mm)
const mu0 = 4*pi*1.e-7 * henry/m
const epsilon0 = 1.0/(c_squared*mu0)
# electromagnetic coupling = 1.43996e-12 MeV*mm/(eplus^2)
const elm_coupling = e_squared/(4*pi*epsilon0)
const fine_structure_const = elm_coupling/hbarc
const classic_electr_radius = elm_coupling/electron_mass_c2
const electron_Compton_length = hbarc/electron_mass_c2
const Bohr_radius = electron_Compton_length/fine_structure_const
const alpha_rcl2 = fine_structure_const*classic_electr_radius*classic_electr_radius
const twopi_mc2_rcl2 = 2pi*electron_mass_c2*classic_electr_radius*classic_electr_radius
const Bohr_magneton = (eplus*hbarc*c_light)/(2*electron_mass_c2)
const nuclear_magneton = (eplus*hbarc*c_light)/(2*proton_mass_c2)
const k_Boltzmann = 8.617333e-11 * MeV/kelvin
const STP_Temperature = 273.15*kelvin
const STP_Pressure = 1.0*atmosphere
const kGasThreshold = 10.0*mg/cm3
const universe_mean_density = 1.e-25*g/cm3
const universe_radius = 14.240e9 * parsec
#---Export the ones exported by Geant4
export alpha_rcl2, amu, amu_c2, Avogadro, Bohr_radius, c_light, c_squared, classic_electr_radius, e_squared
export electron_charge, electron_Compton_length, electron_mass_c2, elm_coupling, epsilon0, fine_structure_const
export h_Planck, hbar_Planck, hbarc, hbarc_squared, k_Boltzmann, kGasThreshold, mu0
export neutron_mass_c2, proton_mass_c2, STP_Pressure, STP_Temperature, twopi_mc2_rcl2
export Bohr_magneton, nuclear_magneton, universe_mean_density, universe_radius
end | Geant4 | https://github.com/JuliaHEP/Geant4.jl.git |
|
[
"Apache-2.0"
] | 0.1.17 | 6936b20a42124581c88a5b0d9cc8b5c234eec682 | code | 6606 | #---HEP coherent system of Units, taken from CLHEP
# The basic units are :
# millimeter (millimeter)
# nanosecond (nanosecond)
# Mega electron Volt (MeV)
# positron charge (eplus)
# degree Kelvin (kelvin)
# the amount of substance (mole)
# luminous intensity (candela)
# radian (radian)
# steradian (steradian)
module SystemOfUnits
const nounit = 1.
# Length [L]
const millimeter = 1.
const millimeter2 = millimeter*millimeter
const millimeter3 = millimeter*millimeter*millimeter
const centimeter = 10*millimeter
const centimeter2 = centimeter*centimeter
const centimeter3 = centimeter*centimeter*centimeter
const meter = 1000*millimeter
const meter2 = meter*meter
const meter3 = meter*meter*meter
const kilometer = 1000*meter
const kilometer2 = kilometer*kilometer
const kilometer3 = kilometer*kilometer*kilometer
const parsec = 3.0856775807e+16*meter
const micrometer = 1.e-6 *meter
const nanometer = 1.e-9 *meter
const angstrom = 1.e-10*meter
const fermi = 1.e-15*meter
const barn = 1.e-28*meter2
const millibarn = 1.e-3 *barn
const microbarn = 1.e-6 *barn
const nanobarn = 1.e-9 *barn
const picobarn = 1.e-12*barn
# symbols
const nm = nanometer
const μm = micrometer
const mm = millimeter
const mm2 = millimeter2
const mm3 = millimeter3
const cm = centimeter
const cm2 = centimeter2
const cm3 = centimeter3
const liter = 1.e+3*cm3
const L = liter
const dL = 1.e-1*liter
const cL = 1.e-2*liter
const mL = 1.e-3*liter
const m = meter
const m2 = meter2
const m3 = meter3
const km = kilometer
const km2 = kilometer2
const km3 = kilometer3
const pc = parsec
# Angle
const radian = 1.
const milliradian = 1.e-3*radian
const degree = (pi/180.0)*radian
const steradian = 1.
# symbols
const rad = radian
const mrad = milliradian
const sr = steradian
const deg = degree
# Time [T]
const nanosecond = 1.
const second = 1.e+9 *nanosecond
const millisecond = 1.e-3 *second
const microsecond = 1.e-6 *second
const picosecond = 1.e-12*second
const minute = 60*second
const hour = 60*minute
const day = 24*hour
const year = 365*day
const hertz = 1. / second
const kilohertz = 1.e+3*hertz
const megahertz = 1.e+6*hertz
# symbols
const ns = nanosecond
const s = second
const ms = millisecond
const μs = microsecond
const ps = picosecond
# Electric charge [Q]
const eplus = 1. # positron charge
const e_SI = 1.602176634e-19 # positron charge in coulomb
const coulomb = eplus/e_SI # coulomb = 6.24150 e+18 * eplus
# Energy [E]
const megaelectronvolt = 1.
const electronvolt = 1.e-6*megaelectronvolt
const kiloelectronvolt = 1.e-3*megaelectronvolt
const gigaelectronvolt = 1.e+3*megaelectronvolt
const teraelectronvolt = 1.e+6*megaelectronvolt
const petaelectronvolt = 1.e+9*megaelectronvolt
const millielectronvolt = 1.e-9*megaelectronvolt
const joule = electronvolt/e_SI # joule = 6.24150 e+12 * MeV
# symbols
const MeV = megaelectronvolt
const eV = electronvolt
const keV = kiloelectronvolt
const GeV = gigaelectronvolt
const TeV = teraelectronvolt
const PeV = petaelectronvolt
# Mass [E][T^2][L^-2]
const kilogram = joule*second*second/(meter*meter)
const gram = 1.e-3*kilogram
const milligram = 1.e-3*gram
# symbols
const kg = kilogram
const g = gram
const mg = milligram
# Power [E][T^-1]
const watt = joule/second # watt = 6.24150 e+3 * MeV/ns
# Force [E][L^-1]
const newton = joule/meter# newton = 6.24150 e+9 * MeV/mm
# Pressure [E][L^-3]
const pascal = newton/m2 # pascal = 6.24150 e+3 * MeV/mm3
const bar = 100000*pascal # bar = 6.24150 e+8 * MeV/mm3
const atmosphere = 101325*pascal # atm = 6.32420 e+8 * MeV/mm3
# Electric current [Q][T^-1]
const ampere = coulomb/second # ampere = 6.24150 e+9 * eplus/ns
const milliampere = 1.e-3*ampere
const microampere = 1.e-6*ampere
const nanoampere = 1.e-9*ampere
# Electric potential [E][Q^-1]
const megavolt = megaelectronvolt/eplus
const kilovolt = 1.e-3*megavolt
const volt = 1.e-6*megavolt
# Electric resistance [E][T][Q^-2]
const ohm = volt/ampere# ohm = 1.60217e-16*(MeV/eplus)/(eplus/ns)
# Electric capacitance [Q^2][E^-1]
const farad = coulomb/volt# farad = 6.24150e+24 * eplus/Megavolt
const millifarad = 1.e-3*farad
const microfarad = 1.e-6*farad
const nanofarad = 1.e-9*farad
const picofarad = 1.e-12*farad
# Magnetic Flux [T][E][Q^-1]
const weber = volt*second# weber = 1000*megavolt*ns
# Magnetic Field [T][E][Q^-1][L^-2]
const tesla = volt*second/meter2# tesla =0.001*megavolt*ns/mm2
const gauss = 1.e-4*tesla
const kilogauss = 1.e-1*tesla
# Inductance [T^2][E][Q^-2]
const henry = weber/ampere# henry = 1.60217e-7*MeV*(ns/eplus)**2
# Temperature
const kelvin = 1.
# Amount of substance
const mole = 1.
# Activity [T^-1]
const becquerel = 1. / second
const curie = 3.7e+10 * becquerel
const kilobecquerel = 1.e+3*becquerel
const megabecquerel = 1.e+6*becquerel
const gigabecquerel = 1.e+9*becquerel
const millicurie = 1.e-3*curie
const microcurie = 1.e-6*curie
const Bq = becquerel
const kBq = kilobecquerel
const MBq = megabecquerel
const GBq = gigabecquerel
const Ci = curie
const mCi = millicurie
const μCi = microcurie
# Absorbed dose [L^2][T^-2]
const gray = joule/kilogram
const kilogray = 1.e+3*gray
const milligray = 1.e-3*gray
const microgray = 1.e-6*gray
const picogray = 1.e-12*gray
const Gy = gray
const kGy = kilogray
const mGy = milligray
const μGy = microgray
const pGy = picogray
# Luminous intensity [I]
const candela = 1.
# Luminous flux [I]
const lumen = candela*steradian
# Illuminance [I][L^-2]
const lux = lumen/meter2
# Miscellaneous
const perCent = 0.01
const perThousand = 0.001
const perMillion = 0.000001
#---Export only the 'symbols'
export nm, μm, mm, cm, cm2, cm3, L, dL, cL, mL, m, km, rad, mrad, er, deg, ns, s, ms, μs, ps, kg, g, mg
export MeV, GeV, keV, eV
export mole, joule, eplus, henry, atmosphere, kelvin, pascal, tesla
end
| Geant4 | https://github.com/JuliaHEP/Geant4.jl.git |
|
[
"Apache-2.0"
] | 0.1.17 | 6936b20a42124581c88a5b0d9cc8b5c234eec682 | code | 194 | using Test
using Geant4
using Geant4.SystemOfUnits
@testset "Geant4 tests" verbose = true begin
include("testGeometry.jl")
include("testRandom.jl")
include("testExamples.jl")
end
| Geant4 | https://github.com/JuliaHEP/Geant4.jl.git |
|
[
"Apache-2.0"
] | 0.1.17 | 6936b20a42124581c88a5b0d9cc8b5c234eec682 | code | 1586 |
function instantiate(env)
cmd = `julia --project=$env -e '
using Pkg
Pkg.develop(PackageSpec(path=pwd()))
Pkg.instantiate()'`
run(cmd)
end
@testset "G4Examples" begin
#---change the working directory
cd(dirname(dirname(pathof(Geant4))))
# All examples should be used for testing to ensure that the release is working correctly
instantiate("examples")
@test run(`julia --project=examples examples/basic/B1/B1.jl`, devnull, devnull).exitcode == 0
@test run(`julia --project=examples examples/basic/B1/B1vis.jl`, devnull, devnull).exitcode == 0
@test run(`julia --project=examples examples/basic/B2/B2a.jl`, devnull, devnull).exitcode == 0
@test run(`julia --project=examples examples/basic/B2/B2aVis.jl`, devnull, devnull).exitcode == 0
@test run(`julia --project=examples examples/basic/B3/B3a.jl`, devnull, devnull).exitcode == 0
@test run(`julia --project=examples examples/extended/RE03/RE03.jl`, devnull, devnull).exitcode == 0
@test run(`julia --project=examples examples/extended/GPS/GPS.jl`, devnull, devnull).exitcode == 0
@test run(`julia --project=examples examples/extended/TestEm3/TestEm3.jl`, devnull, devnull).exitcode == 0
@test run(`julia --project=examples examples/extended/TestEm3/TestEm3Vis.jl`, devnull, devnull).exitcode == 0
@test run(`julia --project=examples examples/advanced/Scintillation/Scintillation.jl`, devnull, devnull).exitcode == 0
@test run(`julia --project=examples examples/advanced/HBC30/HBC30.jl`, devnull, devnull).exitcode == 0
end
| Geant4 | https://github.com/JuliaHEP/Geant4.jl.git |
|
[
"Apache-2.0"
] | 0.1.17 | 6936b20a42124581c88a5b0d9cc8b5c234eec682 | code | 1819 | include(joinpath(@__DIR__, "../examples/basic/B1/DetectorB1.jl"))
@testset "G4Geometry" begin
# Vectors
m1 = G4ThreeVector(1, 2, 3)
m2 = G4ThreeVector(10, 20, 30)
@test x(m1) == 1.
@test y(m1) == 2.
@test z(m1) == 3.
@test m1 + m2 == G4ThreeVector(11,22,33)
@test mag(m1) == √14
@test dot(m2, m2) == mag2(m2)
# Transformations
r1 = G4RotationMatrix(0,0,0) # unit rotation
@test r1 * m1 == m1
rotateX(r1, π/3)
rotateY(r1, π/4)
rotateZ(r1, π/6)
p0 = G4ThreeVector(1,1,1)
v0 = G4ThreeVector(1,1,1)
t1 = G4Transform3D(r1, m1)
@test getRotation(t1) * p0 + getTranslation(t1) == r1 * p0 + m1
#---Solids-------------------------------------------------------------------------------------
#---Geometries---------------------------------------------------------------------------------
world = constructB1Detector(nothing)
@test world isa CxxPtr{G4VPhysicalVolume}
lworld = world |> GetLogicalVolume
@test lworld |> GetName |> String == "World"
# Basic hirarchy navigation
@test lworld |> GetNoDaughters == 1
daughter = GetDaughter(lworld, 0)
@test daughter |> GetName |> String == "Envelope"
@test daughter isa CxxPtr{G4VPhysicalVolume}
# GetVolume by its name
envelope = GetVolume("Envelope")
@test envelope == daughter |> GetLogicalVolume
@test envelope |> GetName |> String == "Envelope"
@test envelope |> GetNoDaughters == 2
# Get Solid and up-cast
solid = envelope |> GetSolid
@test solid isa CxxPtr{G4VSolid}
@test solid |> GetEntityType |> String == "G4Box"
box = CxxRef{getproperty(Main,Symbol(GetEntityType(solid)))}(solid) # upcasting to the concrete G4VSolid
@test box isa CxxRef{G4Box}
@test GetXHalfLength(box) == 100.
end | Geant4 | https://github.com/JuliaHEP/Geant4.jl.git |
|
[
"Apache-2.0"
] | 0.1.17 | 6936b20a42124581c88a5b0d9cc8b5c234eec682 | code | 314 | @testset "G4Random" begin
rdm = G4Random()
eng = G4Random!getTheEngine()
@test eng |> getSeed == 1
@test eng |> flat ≈ 0.4083998396197835
G4Random!setTheSeed(123)
@test G4Random!getTheSeed() == 123
@test eng |> getSeed == 123
@test eng |> flat ≈ 0.3489624013615084
end
| Geant4 | https://github.com/JuliaHEP/Geant4.jl.git |
|
[
"Apache-2.0"
] | 0.1.17 | 6936b20a42124581c88a5b0d9cc8b5c234eec682 | docs | 6938 | 
[](https:///juliahep.github.io/Geant4.jl/dev/)
[](https://github.com/JuliaHEP/Geant4.jl/actions)
[](https://codecov.io/gh/JuliaHEP/Geant4.jl)
Julia bindings for the [Geant4](https://geant4.web.cern.ch) particle transportation toolkit. It is using [CxxWrap.jl](https://github.com/JuliaInterop/CxxWrap.jl) package to wrap C++ types and functions to Julia. Since the Geant4 toolkit is rather large and complex, writing the wrapper code by hand is not really an option. For this, we use the package [WrapIt](https://github.com/grasph/wrapit) that automates the generation of the wrapper code making use of the clang library.
## Installation
The Geant4.jl package does no require any special installation. Stable releases are registered into the Julia general registry, and therefore can be deployed with the standard `Pkg` Julia package manager.
```julia
julia> using Pkg
julia> Pkg.add("Geant4")
```
Examples are located in a separate repository [G4Examples](https://github.com/JuliaHEP/G4Examples.jl) to minimize dependencies since they use additional functionality such as graphics, plotting, analysis tools, etc. To use and play with the examples, the user can clone the examples repository and setup a complete Julia environment with:
```bash
$ git clone https://github.com/JuliaHEP/G4Examples.jl.git
$ julia --project=G4Examples.jl -e 'import Pkg; Pkg.instantiate()'
```
## Getting started
Import the `Geant4` module. All the wrapped Geant4 classes are exported since they are prefixed by `G4` minimizing the chances of a name clash with other Julia symbols.
```julia
julia> using Geant4
julia> runManager = G4RunManager()
**************************************************************
Geant4 version Name: geant4-11-01-patch-01 [MT] (10-February-2023)
Copyright : Geant4 Collaboration
References : NIM A 506 (2003), 250-303
: IEEE-TNS 53 (2006), 270-278
: NIM A 835 (2016), 186-225
WWW : http://geant4.org/
**************************************************************
Geant4.G4RunManagerAllocated(Ptr{Nothing} @0x00007f9fcb6f9c50)
julia> methodswith(G4RunManager, supertypes=true)
[1] convert(t::Type{G4RunManager}, x::T) where T<:G4RunManager in Geant4 at /Users/mato/.julia/packages/CxxWrap/IdOJa/src/CxxWrap.jl:676
[2] AbortEvent(arg1::Union{CxxWrap.CxxWrapCore.CxxRef{<:G4RunManager}, Union{CxxWrap.CxxWrapCore.SmartPointer{T2}, T2} where T2<:G4RunManager}) in Geant4 at /Users/mato/.julia/packages/CxxWrap/IdOJa/src/CxxWrap.jl:618
...
[94] rndmSaveThisRun(arg1::Union{CxxWrap.CxxWrapCore.CxxRef{<:G4RunManager}, Union{CxxWrap.CxxWrapCore.SmartPointer{T2}, T2} where T2<:G4RunManager}) in Geant4 at /Users/mato/.julia/packages/CxxWrap/IdOJa/src/CxxWrap.jl:618
julia> v = GetVersionString(runManager)
ConstCxxRef{G4String}(Ptr{G4String} @0x00007ffed34df2d8)
julia> String(v)
" Geant4 version Name: geant4-11-01-patch-01 [MT] (10-February-2023)"
```
Note that class methods are called with the object instance as first argument. In C++ the `GetVersionString` method would be called as `runManager->GetVersionString()` while in Julia it is called as `GetVersionString(runManager)`. Thanks to the Julia multi-dispatch we do not need to prefix the methods with the module name `Geant4.GetVersionString(runManager)`, even for very common function names such as `mag`.
```julia
julia> v = G4ThreeVector(1,2,3)
Geant4.CLHEP!Hep3VectorAllocated(Ptr{Nothing} @0x00007f9fcaf2a710)
julia> mag(v)
3.7416573867739413
```
The Geant4 system of units and physical constants are in separate sub-modules. You can import it with
```
using Geant4.SystemOfUnits
using using Geant4.PhysicalConstants
```
Only some basic [units](https://github.com/JuliaHEP/Geant4.jl/blob/32f2f0bf9b556ce4cc7a171b1336916da1d648c9/src/SystemOfUnits.jl#L231) are always exported. If you need additional ones, you can do for example:
```
import Geant4.SystemOfUnits:tesla, candela
```
## Running the tests
To run the tests execute `julia --project=. test/runtests.jl`
## Running the examples
For the time being there are only some basic examples plus some more to illustrate some of the features. Place yourself in the checkout `G4Examples.jl` directory.
### basic/B1
This is most basic example using a more Geant4 native interface.
To run it, execute `julia --project=. basic/B1/B1.j` or execute the notebook `B1.ipynb`
### basic/B2a
Basic example using a sensitive detector to collect 'hits'.
To run it, execute `julia --project=. basic/B2/B2a.jl`
### extended/RE03
Example using the Geant4 built-in scoring mechanism.
To run it, execute `julia --project=. basic/B2/B2a.jl`
### TestEm3
This example comes from *extended/electromagnetic/TestEm3* example. Since it requires additional packages such as FHist and Plots it has its own Julia environment in the folder `examples/TestEm3`. It is based on user actions.
To run it, execute `julia --project=. -i extended/TestEm3/TestEm3.jl`
### WaterPhantom
Example in a notebook format similar to RE03 but with different primary particle generator (MedicalBeam) and using the scoring mechanism. Plots are produced after each run.
### HBC30
Example script and in a notebook format of a bubble chamber in which we display the particle tracks for an event that passes the trigger.
To run it, execute `julia --project=. advanced/HBC30/HBC30.jl``
## Building the wrapper code
We use the Geant4 libraries and data from the binary package [Geant4_jll](https://github.com/JuliaBinaryWrappers/Geant4_jll.jl), which has been produced with the `BinaryBuilder` [recipe](https://github.com/JuliaPackaging/Yggdrasil/tree/master/G/Geant4). The wrapper library is downloaded from the binary package [Geant4_julia_jll](https://github.com/JuliaBinaryWrappers/Geant4_julia_jll.jl).
In order to re-generate locally new C++ wrapper code we need to have `wrapit` installed, which itself requires `libclang` to be installed. Use the following command to generate and build the wrappers locally.
```
julia --project=Geant4.jl Geant4.jl/gen/build.jl
```
The C++ classes that get wrapped is controlled by the file `gen/Geant4.wit`. See the documentation of WrapIt for more details. If the wrappers library (libGeant4Wrap.so) is found locally, it uses it at loading of the Geant4.jl module in preference to the registered module `Geant4_julia_jll`. This is a convenient way to test new wrapped classes. Once the wrapper code is stabilized we can move the generated code to the repository [Geant4_cxxwrap](https://github.com/peremato/Geant4_cxxwrap) to regenerate the binary package `Geant4_julia_jll` using the `BinaryBuilder`.
| Geant4 | https://github.com/JuliaHEP/Geant4.jl.git |
|
[
"Apache-2.0"
] | 0.1.17 | 6936b20a42124581c88a5b0d9cc8b5c234eec682 | docs | 381 | # Public API
Documentation for `Geant4.jl` public interface.
## Index - Types
```@index
Pages = ["api.md"]
Modules = [Geant4]
Order = [:type]
```
## Index - Functions
```@index
Pages = ["api.md"]
Modules = [Geant4]
Order = [:function]
```
## Types
```@autodocs
Modules = [Geant4]
Order = [:type]
```
## Functions
```@autodocs
Modules = [Geant4]
Order = [:function]
```
| Geant4 | https://github.com/JuliaHEP/Geant4.jl.git |
|
[
"Apache-2.0"
] | 0.1.17 | 6936b20a42124581c88a5b0d9cc8b5c234eec682 | docs | 29401 | # Geant4.jl
Julia bindings for the [Geant4](https://geant4.web.cern.ch) particle transportation toolkit. It is using [CxxWrap.jl](https://github.com/JuliaInterop/CxxWrap.jl) package to wrap C++ types and functions to Julia. Since the Geant4 toolkit is rather large and complex, writing the wrapper code by hand is not really an option. For this we use the package [WrapIt](https://github.com/grasph/wrapit) that automates the generation of the wrapper code making use of the clang library.
Documentation of the concepts and how to write applications with the Geant4 toolkit can be found with the [Application Developer Guide](https://geant4-userdoc.web.cern.ch/UsersGuides/ForApplicationDeveloper/html/index.html) or the [Classes and Members reference guide](https://geant4.kek.jp/Reference/v11.1.1/index.html) for a detailed description of each C++ class. In this document we will only highlight the differences between the Julia and the C++ API. We will document the additional types that have been added on top of the C++ classes to make the user interface more Julia friendly. To distinguish these new types from the types coming directly from C++ via the CxxWrap wrappers, these types are prefixed with `G4JL`.
## Installation
The Geant4.jl package does no require any special installation. Stable releases are registered into the Julia general registry, and therefore can be deployed with the standard `Pkg` Julia package manager.
```julia
julia> using Pkg
julia> Pkg.add("Geant4")
```
Examples are located in a separate repository [G4Examples](https://github.com/JuliaHEP/G4Examples.jl) to minimize dependencies since they use additional functionality such as graphics, plotting, analysis tools, etc. To use and play with the examples, the user can clone the examples repository and setup a complete Julia environment with:
```sh
$ git clone https://github.com/JuliaHEP/G4Examples.jl.git
$ julia --project=G4Examples.jl -e 'import Pkg; Pkg.instantiate()'
```
## Getting started
Import the `Geant4` module. All the wrapped Geant4 classes are exported since they are prefixed by `G4` minimizing the chances of a name clash with other Julia symbols.
```julia-repl
julia> using Geant4
julia> runManager = G4RunManager()
**************************************************************
Geant4 version Name: geant4-11-01-patch-01 [MT] (10-February-2023)
Copyright : Geant4 Collaboration
References : NIM A 506 (2003), 250-303
: IEEE-TNS 53 (2006), 270-278
: NIM A 835 (2016), 186-225
WWW : http://geant4.org/
**************************************************************
Geant4.G4RunManagerAllocated(Ptr{Nothing} @0x00007f9fcb6f9c50)
julia> methodswith(G4RunManager, supertypes=true)
[1] convert(t::Type{G4RunManager}, x::T) where T<:G4RunManager in Geant4 at /Users/mato/.julia/packages/CxxWrap/IdOJa/src/CxxWrap.jl:676
[2] AbortEvent(arg1::Union{CxxWrap.CxxWrapCore.CxxRef{<:G4RunManager}, Union{CxxWrap.CxxWrapCore.SmartPointer{T2}, T2} where T2<:G4RunManager}) in Geant4 at /Users/mato/.julia/packages/CxxWrap/IdOJa/src/CxxWrap.jl:618
...
[94] rndmSaveThisRun(arg1::Union{CxxWrap.CxxWrapCore.CxxRef{<:G4RunManager}, Union{CxxWrap.CxxWrapCore.SmartPointer{T2}, T2} where T2<:G4RunManager}) in Geant4 at /Users/mato/.julia/packages/CxxWrap/IdOJa/src/CxxWrap.jl:618
julia> v = GetVersionString(runManager)
ConstCxxRef{G4String}(Ptr{G4String} @0x00007ffed34df2d8)
julia> String(v)
" Geant4 version Name: geant4-11-01-patch-01 [MT] (10-February-2023)"
```
Note that class methods are called with the object instance as first argument. In C++ the `GetVersionString` method would be called as `runManager->GetVersionString()` while in Julia it is called as `GetVersionString(runManager)`. Thanks to the Julia multi-dispatch we do not need to prefix the methods with the module name `Geant4.GetVersionString(runManager)`, even for very common function names such as `mag`.
```julia-repl
julia> v = G4ThreeVector(1,2,3)
Geant4.CLHEP!Hep3VectorAllocated(Ptr{Nothing} @0x00007f9fcaf2a710)
julia> mag(v)
3.7416573867739413
```
## Geant4 Julia interface
The main goal for defining a Geant4 application in the Julia interface is to create an instance of the [`G4JLApplication`](@ref) type, where all the needed elements for running a Geant4 application are declared, such as the detector geometry, the physics list, the primary particle generator, the type of run manager, the user actions, etc.

These are the needed elements:
- **detector**. An instance of a detector structure inheriting from the abstract type `G4JLDetector`, in which all the detector parameters are defined. The user should also provide a method specialization of `Geant4.getConstructor(::G4JLDetector)::Function` to return the Julia function that toolkit needs to call in order to construct the geometry and return the pointer of the 'world' physical volume. There is no default.
- **field**. An instance of the magnetic field class. The ` G4JLUniformMagField(...)` function provides a uniform magnetic field. See later how to define a custom one.
- **simdata**. An instance of the simulation data structure that the program will need to collect during the simulation execution. This mutable structure needs to inherit from the abstract type `G4JLSimulationData` and is completely user defined with counters, data structures to collect the hits or doses, histograms, etc. The default is an instance of type `G4JLNoData`.
- **nthreads**. Number of worker threads to be used. The default is 0, which means serial mode. Any number > 0 will use the MT functionality of Geant4, and therefore the user would need to pay attention to the user actions that are run concurrently to avoid data races (see [Julia doc on multi-threading](https://docs.julialang.org/en/v1/manual/multi-threading/#Data-race-freedom))
- **verbose**. Verbosity level (for physics list). The default is 0.
- **physics_type**. The physics list predefined type. Default is `FTFP_BERT`.
- **generator_type**. The primary generator generator type. The default is `G4JLParticleGun`, which encapsulates a `G4ParticleGun`. The underlying `G4ParticleGun` can be obtained by calling `GetGun()`.
- **user actions**. Julia methods defining the different possible user actions (e.g. stepping action, tracking action, run action, event action). The default is no action.
- **sdetectors**. List of sensitive detectors. This is given as a `Vector` of pairs `lv::String => sd::G4JLSensitiveDetector` to associate logical volumes by name to sensitive detector instances (see next section).
- **scorers**. List of scoring meshes defined with the function [`G4JLScoringMesh`](@ref).
Once the `G4JLApplication` is instantiated (and implicitly an instance of the `G4RunManager` created), the user can control the application with the following commands:
- `configure(::G4JLApplication)`. It associates the physics list, generator and user actions to the selected `G4RunManager` instance.
- `initialize(::G4JLApplication)`. It basically calls the `Initialize()` method of the run manager and associate the declared sensitive detectors.
- `reinitialize(::G4JLApplication, ::G4JLDetector)`. It re-defines the declared detector geometry with a new instance of `G4JLDetector`.
- ` beamOn(::G4JLApplication, ::Int)`. Starts a run with a given number of events.
### Constructing the detector
Parameters of the detector are collected in a user defined mutable data structure inheriting from `G4JLDetector`. The user also needs to provide a Julia method for constructing the geometry. This method needs to have the signature
```julia
<User_Det_Constructor_Function>(::G4JLDetector)::CxxPtr{G4VPhysicalVolume}
```
The only argument of the function gives access to the user defined structure with all the detector parameters.
!!! note
The type `CxxPtr{G4VPhysicalVolume}` denotes a C++ pointer to the `G4VPhysicalVolume` type.
The user can use the native G4 classes for constructing the geometry such as the different type of solids (e.g. G4Box, G4Tubs, etc.), `G4LogicalVolume`, `G4PVPlacement`, `G4PVReplica`, etc. Alternatively can the type [`G4JLDetectorGDML`](@ref) to construct a detector from a GDML file.
!!! note
Note that by default constructed C++ objects from Julia would get automatically deleted by the Julia garbage collector (GC) since a `finalizer` gets installed to the wrapper classes. This is particularly a problem when constructing the geometry.
Currently for the following classes have the `finalizer` disabled in the wrapper: `G4PVPlacement`, `G4LogicalVolume`, `G4PVReplica`, `G4Material`, `G4Isotope`, `G4Element`. This means that instances of them will not be deleted by Julia to avoid double deletion (often a crash) when the geometry gets deleted at the finalization of the application from the C++ side.
A pointer to any of the `G4Solid` needs to be passed to the `G4LogicalVolume` using [`move!(objref)`](@ref) function to transfer the ownership of the referenced object to the C++ side. See the following example:
```julia
trackerS = G4Tubs("tracker", 0, trackerSize, trackerSize, 0, 360deg)
trackerLV = G4LogicalVolume(move!(trackerS), m_air, "Tracker")
G4PVPlacement(nothing, # no rotation
G4ThreeVector(0,0,0), # at (0,0,0)
trackerLV, # its logical volume
"Tracker", # its name
worldLV, # its mother volume
false, # no boolean operations
0, # copy number
checkOverlaps) # checking overlaps
```
### Physics List
The user can provide one of the pre-defined physics lists, such as `QGS_BIC`, `QBBC` or `FTFP_BERT`. Alternatively, the user can define a Julia `struct`` as a subtype of `G4VUserPhysicsList` and modify some of the physcis in the constructor. For example:
```julia
struct ScintPhysicsList <: G4VUserPhysicsList
function ScintPhysicsList(verbose)
pl = FTFP_BERT(verbose)
# replace G4EmStandardPhysics
ReplacePhysics(pl, move!(G4EmStandardPhysics_option4(verbose)))
# register G4OpticalPhysics
RegisterPhysics(pl, move!(G4OpticalPhysics(verbose)))
# activate scintillation
optpar = G4OpticalParameters!Instance()
SetProcessActivation(optpar, "Scintillation", true)
# activate cherenkov radiation
SetProcessActivation(optpar, "Cerenkov", true)
return pl
end
end
```
### Magnetic field
The user can define either an uniform magnetic field or a custom one. To define an custom one:
- define first a user structure for the parameters that inherits from the abstract type `G4JLFieldData`
- then, define a function with the signature `(result::G4ThreeVector, position::G4ThreeVector, params::G4JLFieldData)::Nothing`
- and finally, with all this, instantiate the magnetic field calling the function
```
G4JLMagneticField(<name>, <data>; getfield_method=<function>)
```
### Primary Particle Generator
The user can define either a custom primary particle generator or use one of the two defined ones:
- **G4JLGunGenerator**. Uses the `G4ParticleGun` class of Geant4 that generates a single particle type with a fix kinetic energy, position and direction.
```julia
G4JLGunGenerator(particle = "proton",
energy = 3GeV,
direction = G4ThreeVector(0,0,1),
position = G4ThreeVector(0,0,-2940.0))
```
- **G4JLGeneralParticleSource**. Uses the `G4GeneralParticleSource` class of Geant4 to generate one of more sources of primary particles with predefined distributions for energy, position and direction. An example can be:
```julia
src1 = (particle="e+", intensity=0.5,
ene=(type="Exp", min=2MeV, max=10MeV, ezero=2.),
pos=(type="Plane", shape="Circle", centre=G4ThreeVector(1cm,2cm,0cm), radius=3cm),
ang=(type="cos", mintheta=0deg, maxtheta=180deg))
src2 = (particle="gamma", intensity=0.5,
ene=(type="Brem", min=2MeV, max=10MeV, temp=2e12),
pos=(type="Plane", shape="Ellipse", centre=G4ThreeVector(3cm,1cm,0cm), halfx=1cm, halfy=2cm),
ang=(type="iso", mintheta=0deg, maxtheta=180deg))
gps = G4JLGeneralParticleSource(sources = [ src1, src2 ])
```
the standard particle gun parameters works as well:
```julia
G4JLGeneralParticleSource(particle = "proton",
energy = 3GeV,
direction = G4ThreeVector(0,0,1),
position = G4ThreeVector(0,0,0))
```
- **Custom Generator**. It is fairly simple to write a custom generator. It is needed to define a structure for the parameters to configure the generator, and a two functions to initialize and generate the primary particles called for each event. Here is an example:
```julia
# define the data structure with the generator parameters
mutable struct PlaneSourceData <: G4JLGeneratorData
particleName::String
particlePtr::CxxPtr{G4ParticleDefinition}
energy::Float64
halfx::Float64
halfy::Float64
position::G4ThreeVector
direction::G4ThreeVector
end
# define the constructor with the default parameters
function PlaneSource(;particle="gamma", energy=0.07MeV, halfx=7cm, halfy=7cm,
position=G4ThreeVector(0,0,-14.9cm), direction=G4ThreeVector(0,0,1))
data = PlaneSourceData(particle, CxxPtr{G4ParticleDefinition}(C_NULL), energy, halfx, halfy, position, direction)
function init(data:: PlaneSourceData, app::G4JLApplication)
data.particlePtr = FindParticle(data.particleName)
end
function generate( evt::G4Event, data:: PlaneSourceData)::Nothing
mass = data.particlePtr |> GetPDGMass
momentum = √((mass + data.energy)^2 - mass^2)
pvec = momentum * data.direction
pos = data.position + G4ThreeVector( data.halfx * (rand() - 0.5), data.halfy * (rand() - 0.5), 0)
primary = G4PrimaryParticle(data.particlePtr, pvec |> x, pvec |> y, pvec |> z )
vertex = G4PrimaryVertex(pos, 0ns)
SetPrimary(vertex, move!(primary)) # note that we give up ownership of the objects just created
AddPrimaryVertex(evt, move!(vertex)) # note that we give up ownership of the objects just created
end
G4JLPrimaryGenerator("PlaneSource", data; init_method=init, generate_method=generate)
end
```
### User Actions
User actions are native Julia functions that are callbacks of the Geant4 toolkit. They are declared in the constructor of `G4JLApplication`, so they do not need to be associated to a specific function name. All user actions receive a reference to the `G4JLApplication` from which the user can obtain details of the actual application, such as the current detector, the physics, the generator, or the running simulation data. There are the available attributes of the application instance:
```julia
runmanager::G4RunManager # The C++ G4RunManager instance
detector::DET # User defined detector structure with all parameters
simdata::Vector{DAT} # User defined simulation data structs (each worker has its own)
physics::G4VUserPhysicsList # Physics List
generator::G4JLPrimaryGenerator # Primary particle generator
field::Union{G4Field, G4JLMagneticField} # Magnetic field instance
evtdisplay::G4JLDisplay # Event display instance
nthreads::Int32 # number of worker threads
verbose::Int32 # verbosity level for physics lists
sdetectors::Dict{String,Vector{G4JLSensitiveDetector}} # dictionary of sensitive detectors
scorers::Vector{G4JLScoringMesh} # vector of scoring meshes
```
The following are the currently defined possible user actions:
- **stepping action**. Called on each simulation step. The signature is `(::G4Step, ::G4JLApplication)::Nothing`. Consult the [G4Step](https://geant4.kek.jp/Reference/v11.1.1/classG4Step.html) reference manual to see what you can get from it.
- **pre-tracking action**. Called at the creation of a new participle being tracked. The signature is `(::G4Track, ::G4JLApplication)::Nothing`. Consult the [G4Step](https://geant4.kek.jp/Reference/v11.1.1/classG4Track.html) reference manual to see what you can get from it.
- **post-tracking action**. Called at the end of the particle being tracked. The signature is `(::G4Track, ::G4JLApplication)::Nothing`. Consult the [G4Track](https://geant4.kek.jp/Reference/v11.1.1/classG4Track.html) reference manual to see what you can get from it.
- **begin-event action**. Called at the beginning of each event. The signature is `(::G4Event, ::G4JLApplication)::Nothing`. Consult the [G4Event](https://geant4.kek.jp/Reference/v11.1.1/classG4Event.html) reference manual to see what you can get from it.
- **end-event action**. Called at the end of each event. The signature is `(::G4Event, ::G4JLApplication)::Nothing`. Consult the [G4Event](https://geant4.kek.jp/Reference/v11.1.1/classG4Event.html) reference manual to see what you can get from it.
- **begin-run action**. Called at the beginning of a run. The signature is `(::G4Run, ::G4JLApplication)::Nothing`. Consult the [G4Run](https://geant4.kek.jp/Reference/v11.1.1/classG4Run.html) reference manual to see what you can get from it.
- **end-run action**. Called at the end of a run. The signature is `(::G4Run, ::G4JLApplication)::Nothing`. Consult the [G4Run](https://geant4.kek.jp/Reference/v11.1.1/classG4Run.html) reference manual to see what you can get from it.
### Sensitive Detectors
The user can define sensitive detectors by defining a data structure and 3 callback functions, which will initialize, fill and dispose the defined data structure. Later, the instantiated sensitive detector would be associated to one or more logical volumes of the detector setup. Instantiating a `G4JLSensitiveDetector` will require the following arguments:
- **name**. A string to identify the sensitive detector. No default.
- **sd data**. A instance of a user defined `G4JLSDData` mutable data structure that will passed to each callback invocation.
- **initialize method**. User method that is called at the beginning of each event. The signature is `(::B2aSDData, ::G4HCofThisEvent)::Nothing`.
- **endOfEvent method**. User method that is called at the end of each event. The signature is `(::B2aSDData, ::G4HCofThisEvent)::Nothing`.
- **processHits method**. User method that is called at simulation step that ends at the associated logical volume. The signature is `(::B2aSDData, ::G4Step, ::G4TouchableHistory)::Bool`. Consult the [G4Step](https://geant4.kek.jp/Reference/v11.1.1/classG4Step.html) reference manual to see what you can get from the G4Step. It returns true if a true hit is generated.
The following is a example on how to define a sensitive detector
```julia
#--------------------------------------------------------------------------------------------------
#---Define Crystal Sensitive Detector--------------------------------------------------------------
#--------------------------------------------------------------------------------------------------
#---SD collected data------------------------------------------------------------------------------
struct CrystalSDData <: G4JLSDData
hitcollection::Vector{Hit} # define a hit collection
CrystalSDData() = new(Hit[])
end
#---Initialize method------------------------------------------------------------------------------
function crystal_initialize(::G4HCofThisEvent, data::CrystalSDData)::Nothing
empty!(data.hitcollection) # empty the hit collection at every event
return
end
#---Process Hit method-----------------------------------------------------------------------------
function crystal_processHits(step::G4Step, ::G4TouchableHistory, data::CrystalSDData)::Bool
part = step |> GetTrack |> GetParticleDefinition
part == G4OpticalPhoton && return false
edep = step |> GetTotalEnergyDeposit
edep < 0. && return false
pos = step |> GetPostStepPoint |> GetPosition
push!(data.hitcollection, Hit(0., pos, edep, ScintCryst)) # fill the collection with a new Hit
return true
end
#---Create SD instance-----------------------------------------------------------------------------
G4JLSensitiveDetector("Crystal_SD", CrystalSDData(); # name an associated data are mandatory
processhits_method=crystal_processHits, # process hist method (also mandatory)
initialize_method=crystal_initialize) # intialize method
```
### Scoring meshes
The user can also specify scoring meshes to obtain quantities on the defined grid. In Geant4 this is achieved using a set of UI commands. In this Julia interface this functionality has been encapsulated in a number of data structures. The function to create a scoring mesh is [`G4JLScoringMesh`](@ref) and receive as arguments the the type and dimensions of the mesh, the position, the rotation, the number of bins in each dimension, and the quantities to accumulate with eventually some filter conditions. See for example the scoring mesh from RE03:
```julia
sc1 = G4JLScoringMesh("boxMesh_1",
BoxMesh(1m,1m,1m),
bins = (30, 30, 30),
quantities = [ energyDeposit("eDep")
nOfStep("nOfStepGamma", filters=[ParticleFilter("gammafilter", "gamma")])
nOfStep("nOfStepEMinus", filters=[ParticleFilter("eMinusFilter", "e-")])
nOfStep("nOfStepEPlus", filters=[ParticleFilter("ePlusFilter", "e+")])
]
)
```
The scoring mesh is added into the 'scorers` argument when constructing a [`G4JLApplication`](@ref). After a run hs been executed, the user can obtain the quantity values (sum, sum2,entries) on the 3D grid just calling by accessing the quantity as an attribute of the scoring mesh. The returned 3D Julia array is shaped to the declared bins.
```julia-repl
julia> beamOn(app,10000)
julia> sum, sum2, entries = sc1.eDep
julia> typeof(sum)
Array{Float64, 3}
julia> typeof(entries)
Array{Int64, 3}
julia> size(sum)
(30, 30, 30)
```
### Detector and Event Display
For visualization applications, the user can create an instance of `G4JLEventDisplay([settings file])` and give it to the constructor of `G4JLApplication` in the `evtdisplay` attribute. The constructor accepts a visualization settings file that will overwrite the default settings in the file [Geant4.jl/ext/G4Vis/settings.jl](https://github.com/JuliaHEP/Geant4.jl/blob/master/ext/G4Vis/settings.jl). The format of the settings is Julia `NamedTuple`. Here is an example:
```julia
(
display = (
show_axis = false,
),
trajectories = (
color = :yellow,
),
)
```
## Examples
### basic/B1
This is most basic example. For this example we have kept the interface closer to the native C++ interface. The only difference with respect the C++, is that that we need to instantiate a `G4JLDetectorConstruction` with the Julia function that will be callback to construct the detector geometry as argument. This is because we cannot inherit from Julia the C++ class `G4VUserDetectorConstruction`, which is the way foreseen in the native Geant4 toolkit to provide the specific user detector geometry. Similarly, for the user actions and primary particle generator we need to instantiate a `G4JLActionInitialization`. The interaction with the application is done with the `G4UImanager`.
To run it execute
```
julia --project=examples examples/basic/B1/B1.j
```
or execute the notebook `examples/basic/B1/B1.ipynb`
### basic/B2a
This example fills a vector of `TrackerHit` that is stored in the `B2aSDData` simulation data structure for each event. This is achieved with a sensitive detector associated to the `Chamber` logical volume. The example is using the high-level Julia interface with the instantiation of a `G4JLApplication` declaring all the elements of the application (geometry, physics, simulation data, user actions, etc.)
To run it execute
```
julia --project=examples examples/basic/B2/B2a.jl
```
### extended/RE03
This example makes use of the built-in scoring capability of Geant4 with a new Julia API interface creating an instance of [`G4JLScoringMesh`](@ref), instead of using the native Geant4 UI. The user defines a scoring mesh, and quantities to be collect and gets the results after the run.
To run it execute
```
julia --project=examples examples/extended/RE03/RE03.jl
```
### WaterPhantom
This example is similar to RE03 but it defines a custom primary particle generator (`MedicalBeam`) instead of using the predefined particle gun generator (`G4JLGunGenerator`). It is a notebook and produces plots after each run.
To run it execute
```
jupyter notebook examples/WaterPhantom/WaterPhantom.ipynb
```
See the [rendered notebook](https://juliahep.github.io/Geant4.jl/dev/notebooks/WaterPhantom).
### TestEm3
This example comes from *extended/electromagnetic/TestEm3* example. Since it requires additional packages such as `FHist` for histograms and `Plots` for their visualization, it has its own Julia environment in the folder `examples/TestEm3`. It uses the Julia high-level interface with the instantiation of a `G4JLApplication` declaring all the elements of the application.
To run it, execute
```
julia --project=examples -i examples/TestEm3/TestEm3.jl
```
### Scintillator
Example with optical photons and customized physics list, as well as with a couple of sensitive detectors (for the crystal and silicon) and some simple analysis of the results.
To run it, execute
```bash
julia --project=examples -i examples/Scintillator/Scintillator.jl
```
## Visualization examples
The Geant4.jl project includes additional functionality for visualization under the extension directory `ext/G4Vis/examples`. This is done in a different directory to separate and minimise the dependencies. The julia `ext/G4Vis/examples/Project.toml` file has the complete list of dependencies needed for running these examples. In order to load all the required dependencies the user can execute the first time:
```
`julia --project=Geant4.jl/ext/G4Vis/examples -e 'import Pkg; Pkg.instantiate()'`
```
!!! note
Note that depending on the actual platform and the desired interactivity, the user may need to choose a different `Makie.jl` backend among the existing ones (`GLMakie`, `CairoMakie`, `WGLMakie`,...).
### B1vis.jl
This example uses the `GLMakie` backend (OpenGL) of Makie. The use may change to other backends depending on his/her setup. To visualize the B1 detector do:
```
julia --project=ext/G4Vis/examples -i ext/G4Vis/examples/B1vis.jl
```
!!! note
Note the option `-i` to keep the interactive session open
### B2aVis.jl
This example to visualize the detector and (a very simplistic) visualization of one event.
```
julia --project=ext/G4Vis/examples -i ext/G4Vis/examples/B2avis.jl
```
### Solids.ipynb
This notebook shows all the possible solids in Geant4. This is work in progress and some solids do not have graphical representation yet.
```
jupyter notebook ext/G4Vis/examples/Solids.ipynb
```
See the [rendered notebook](https://juliahep.github.io/Geant4.jl/dev/notebooks/Solids/)
### HBC30
This example mimics the CERN 30cm liquid hydrogen bubble chamber. It demonstrates the use of a uniform magnetic field (`G4JLUniformMagField`). It is useful for displaying the detector and the produced particles in a customizable manner.
To run it, execute
```
julia --project=ext/G4Vis/examples -i ext/G4Vis/examples/HBC30/HBC30.jl
```
It also exists in a [notebook](https://juliahep.github.io/Geant4.jl/dev/notebooks/HBC30/) format.
## Building the wrapper code
We use the Geant4 native binary libraries and data from the binary package [Geant4\_jll](https://github.com/JuliaBinaryWrappers/Geant4_jll.jl), which has been produced with the `BinaryBuilder` [recipe](https://github.com/JuliaPackaging/Yggdrasil/tree/master/G/Geant4). The wrapper library is downloaded from the binary package [Geant4\_julia\_jll](https://github.com/JuliaBinaryWrappers/Geant4_julia_jll.jl).
We have the possibility during the development of this package to re-generate locally new C++ wrapper code. For this we need to have `wrapit` installed, which itself requires `libclang` to be installed. If the executable is not found (not in the PATH), we can use the wrapper code that is already pre-generated and distributed with this package.
- what C++ classes get wrapped is controlled by the file `gen/Geant4.wit.in`. See the documentation of WrapIt for the details.
- run the `gen/build.jl` script generate the wrapper code (if wrapit found) and build the wrapper library.
!!! note
Please note that compiling the single generated wrapper file takes very long. This is due to the current implementation of WrapIt that places all wrapped types in a single file. This may change in the future.
Once the wrapper code is stabilized we move the generated code to the repository [Geant4\_cxxwrap](https://github.com/peremato/Geant4_cxxwrap) to regenerate the binary package `Geant4_julia_jll` using the `BinaryBuilder`.
| Geant4 | https://github.com/JuliaHEP/Geant4.jl.git |
|
[
"Apache-2.0"
] | 0.1.17 | 6936b20a42124581c88a5b0d9cc8b5c234eec682 | docs | 4010 | ## Release Notes
### 0.1.17
- New Features
- Added wrapper for G4StepLimiterPhysics
### 0.1.15
- Fixes:
- Returned threadid is -2 for non-multithreaded builds of Geant4
### 0.1.14
- New Features:
- Added function getfield(pos::G4ThreeVector, bfield::G4JLMagneticField)
- Added G4JLEmptyDetector as default detector (for testing and tutorials)
- build some protection to re-use physics lists in case re-creating an `G4JLApplication`
### 0.1.13
- New Features:
- Added wrappers for PhysicsLists classes: `G4DecayPhysics`, `G4EmStandardPhysics`, `G4RadioactiveDecayPhysics`
- Implemented user stacking action
- Added examples basic/B3a
### 0.1.12
- Moved the examples to new repository https://github.com/JuliaHEP/G4Examples.jl
- Fixes:
- Veto CLHEP::HepRandomEngine::get to avoid clash with Base.get
- Disable finalizer for class G4JLStateDependent. Fixes event display.
### 0.1.11
- Migrated to Julia 1.10
- Using the latest version of WrapIt to generate the CxxWrap wrappers
- New features
- Added `G4JLGeneralParticleSource`, which makes use of the Geant4 `G4GeneralParticleSource` class for generation of primary particles for simulations. See documentation.
- Added a new extension module `G4Hist` that defines convenient histogram types (H1D and H2D) on top of FHist.jl. See documentation.
### 0.1.10
- New features
- Provide an EventDisplay as a building block of the application. New `evtdisplay` argument in the constructor.
- Added new wrapped classes to support event displays: G4EventManager, G4TrajectoryContainer, G4VTrajectory, G4VTrajectoryPoint, G4StateManager, G4VStateDependent G4ApplicationState
### 0.1.9
- Changes needed for running ATLTileCalTB example
### 0.1.8
- New features:
- Added all example scripts as tests, so that the CI will systematically execute them.
- Added new wrapped classes: G4LogicalVolumeStore, G4MaterialPropertiesTable, G4OpticalParameters, G4OpticalSurface, G4LogicalBorderSurface, G4LogicalSkinSurface, G4VProcess, G4ProcessType, G4Random, G4EmStandardPhysics_option4, G4OpticalPhysics, G4OpBoundaryProcess, G4ProcessManager, G4ProcessVector
- Added `Geant4.PhysicalConstants` sub-module
- Added testRandom to the test suite
- Added `Scintillation` example with scintillating crystals and optical photons
- Fixes:
- Fixed broken [visualization] examples
- Improve HBC30 example to visualize trajectories in a more smooth manner. Moved it under `ext/G4Vis/examples`
### 0.1.7
- New features:
- Support for ARM64 (MacOS M1). Implemented workaround for `closures` in `@safe_cfunction`, which were for supported in this platform.
- Added support for magnetic field. Additional keyword argument in the `G4JLApplication`. Added example HBC30 for a simulation of a liquid hydrogen bubble chamber.
- Added drawing capabilities for `G4Polyhedra`, `G4EllipticalTube`, `G4Ellipsoid`
- Fixes:
- Fix example WaterPhantom to added to new callback closures
### 0.1.6
- New features:
- Restructured G4Vis as an extension of Geant4.jl. It is automatically loaded when all the required weak dependencies are satisfied (e.g. "Makie", "Colors", "StaticArrays", "Rotations", "LinearAlgebra")
- Added missing basic solids including the `BooleanSolid`
- Using the `IGLWrap_jll` binary wrapper to `libigl` C++ library to draw boolean solids.
- Added notebook `ext/G4Vis/Solids.ipynb` to exercise and draw all possible solids (some are still missing)
### 0.1.5
- New features:
- Support for Multi-threading. New parameter in `G4JLApplication` to set the number of threads. Default is serial, `nthreads = 0`
- Added verbosity level with the argument `verbose` in [`G4JLApplication`](@ref)
- Fixes:
- Fix continuous pre-compilation after moving to Julia 1.9
- Fix for embedded figures in notebooks
### 0.1.4
- Added support for scoring with `G4JLScoringMesh` function
- Added generation of documentation including running the jupyter notebooks
| Geant4 | https://github.com/JuliaHEP/Geant4.jl.git |
|
[
"MIT"
] | 0.1.1 | aebf55e6d7795e02ca500a689d326ac979aaf89e | code | 106 | module Baselet
include("specialized.jl")
include("utils.jl")
include("implementations.jl")
end # module
| Baselet | https://github.com/tkf/Baselet.jl.git |
|
[
"MIT"
] | 0.1.1 | aebf55e6d7795e02ca500a689d326ac979aaf89e | code | 9713 | # Vendoring `Base.afoldl`
afoldl(op,a) = a
afoldl(op,a,b) = op(a,b)
afoldl(op,a,b,c...) = afoldl(op, op(a,b), c...)
function afoldl(op,a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,qs...)
y = op(op(op(op(op(op(op(op(op(op(op(op(op(op(op(a,b),c),d),e),f),g),h),i),j),k),l),m),n),o),p)
for x in qs; y = op(y,x); end
y
end
@inline getindex(x, inds...) = Base.getindex(x, inds...)
@def getindex(xs::Tuple, ind::UnitRange) = ntuple(i -> xs[ind[i]], length(ind))
@inline filter(f, xs) = Base.filter(f, xs)
@def filter(f::F, xs::Tuple) where {F} =
afoldl((ys, x) -> f(x) ? (ys..., x) : ys, (), xs...)
zip(x, xs...) = Base.zip(x, xs...)
# Not using `zip(tuples::NTuple{N,Any}...)` to avoid having unbound
# type parameter.
@def function zip(x::NTuple{N,Any}, xs::NTuple{N,Any}...) where {N}
tuples = (x, xs...)
return ntuple(i -> map(t -> t[i], tuples), N)
end
@inline flatten(itr) = Iterators.flatten(itr)
@def flatten(xs::Tuple) = _flatten(xs...)
@inline _flatten() = ()
@inline _flatten(x::Tuple, xs::Tuple...) = (x..., _flatten(xs...)...)
@inline enumerate(itr) = Base.enumerate(itr)
@def enumerate(xs::Tuple) = Specialized.zip(ntuple(identity, length(xs)), xs)
struct _InitialValue end
struct BottomRF{T}
rf::T
end
@inline (op::BottomRF)(::_InitialValue, x) = Base.reduce_first(op.rf, x)
@inline (op::BottomRF)(acc, x) = op.rf(acc, x)
@inline accumulate(args...; kw...) = Base.accumulate(args...; kw...)
@def function accumulate(op::F, xs::Tuple; init = _InitialValue()) where {F}
rf = BottomRF(op)
ys, = afoldl(((), init), xs...) do (ys, acc), x
acc = rf(acc, x)
(ys..., acc), acc
end
return ys
end
@inline cumsum(args...; kw...) = Base.cumsum(args...; kw...)
@def cumsum(xs::Tuple) = Specialized.accumulate(Base.add_sum, xs)
@inline cumprod(args...; kw...) = Base.cumprod(args...; kw...)
@def cumprod(xs::Tuple) = Specialized.accumulate(Base.mul_prod, xs)
@inline foreach(f, xs, rest...) = Base.foreach(f, xs, rest...)
# Not using `@def` so that `Specialized.foreach` is always defined via `foldl`.
@inline foreach(f, xs::Tuple, rest::Tuple...) = Specialized.foreach(f, xs, rest...)
@inline Specialized.foreach(f, xs) = afoldl((_, x) -> (f(x); nothing), nothing, xs...)
@inline Specialized.foreach(f, xs, rest...) =
afoldl((_, args) -> (f(args...); nothing), nothing, zip(xs, rest...)...)
# https://github.com/JuliaLang/julia/pull/31901
@inline any(f, itr) = Base.any(f, itr)
@inline any(itr) = Base.any(itr)
@def any(xs::Tuple) = _any(identity, xs...)
@def any(f::F, xs::Tuple) where {F} = _any(f, xs...)
@inline _any(f) = false
@inline function _any(f::F, x, xs...) where {F}
y = f(x)
y === true && return true
return y | _any(f, xs...)
end
@inline all(f, itr) = Base.all(f, itr)
@inline all(itr) = Base.all(itr)
@def all(xs::Tuple) = _all(identity, xs...)
@def all(f::F, xs::Tuple) where {F} = _all(f, xs...)
@inline _all(f) = true
@inline function _all(f::F, x, xs...) where {F}
y = f(x)
y === false && return false
return y & _all(f, xs...)
end
@inline findfirst(xs) = Base.findfirst(xs)
@inline findfirst(f, xs) = Base.findfirst(f, xs)
@def findfirst(xs::Tuple{Vararg{Bool}}) = _findfirst(identity, 1, xs...)
@def findfirst(f::F, xs::Tuple) where {F} = _findfirst(f, 1, xs...)
@inline _findfirst(f, n) = nothing
@inline _findfirst(f::F, n, x, xs...) where {F} = f(x) ? n : _findfirst(f, n + 1, xs...)
@inline findlast(xs) = Base.findlast(xs)
@inline findlast(f, xs) = Base.findlast(f, xs)
@def findlast(xs::Tuple{Vararg{Bool}}) = _findlast(identity, 1, xs...)
@def findlast(f::F, xs::Tuple) where {F} = _findlast(f, 1, xs...)
@inline _findlast(f, n) = nothing
@inline function _findlast(f::F, n, x, xs...) where {F}
i = _findlast(f, n + 1, xs...)
i === nothing || return i
return f(x) ? n : nothing
end
@inline findall(xs) = Base.findall(xs)
@inline findall(f, xs) = Base.findall(f, xs)
@def findall(xs::Tuple{Vararg{Bool}}) = _findall(identity, 1, xs...)
@def findall(f::F, xs::Tuple) where {F} = _findall(f, 1, xs...)
@inline _findall(f, n) = ()
@inline _findall(f, n, x, xs...) = ((f(x) ? (n,) : ())..., _findall(f, n + 1, xs...)...)
@inline findmax(args...; kw...) = Base.findmax(args...; kw...)
@def findmax(xs::Tuple{}) = throw(ArgumentError("collection must be non-empty"))
@def findmax(xs::Tuple) = _findmax(zip(xs, ntuple(identity, length(xs)))...)
@inline _findmax(ans) = ans
@inline function _findmax((x, i), (y, j), rest...)
x != x && return (x, i)
return _findmax(y != y || isless(x, y) ? (y, j) : (x, i), rest...)
end
@inline findmin(args...; kw...) = Base.findmin(args...; kw...)
@def findmin(xs::Tuple{}) = throw(ArgumentError("collection must be non-empty"))
@def findmin(xs::Tuple) = _findmin(zip(xs, ntuple(identity, length(xs)))...)
@inline _findmin(ans) = ans
@inline function _findmin((x, i), (y, j), rest...)
x != x && return (x, i)
return _findmin(y != y || isless(y, x) ? (y, j) : (x, i), rest...)
end
@inline argmax(args...; kw...) = Base.argmax(args...; kw...)
@def argmax(xs::Tuple) = Specialized.findmax(xs)[2]
@inline argmin(args...; kw...) = Base.argmin(args...; kw...)
@def argmin(xs::Tuple) = Specialized.findmin(xs)[2]
@inline maximum(args...; kw...) = Base.maximum(args...; kw...)
@def maximum(xs::Tuple) = Specialized.findmax(xs)[1]
@inline minimum(args...; kw...) = Base.minimum(args...; kw...)
@def minimum(xs::Tuple) = Specialized.findmin(xs)[1]
@inline extrema(args...; kw...) = Base.extrema(args...; kw...)
@def extrema(xs::Tuple{}) = throw(ArgumentError("collection must be non-empty"))
@def extrema(xs::Tuple) = Specialized.extrema(identity, xs)
@def extrema(f, xs::Tuple{}) = throw(ArgumentError("collection must be non-empty"))
@def function extrema(f::F, xs::Tuple) where {F}
x = xs[1]
x != x && return (x, x)
return _extrema(f, (x, x), Base.tail(xs)...)
end
@inline _extrema(f, ans) = ans
@inline function _extrema(f::F, (l, g), x, xs...) where {F}
x != x && return (x, x)
return _extrema(f, (isless(x, l) ? x : l, isless(g, x) ? x : g), xs...)
end
@inline in(x, xs) = Base.in(x, xs)
@inline in(x) = Base.Fix2(in, x)
@inline Specialized.in(x) = Base.Fix2(Specialized.in, x)
@def in(x, xs::Tuple) = _any(==(x), xs...)
@inline unique(f, itr) = Base.unique(f, itr)
@inline unique(itr) = Base.unique(itr)
@def unique(xs::Tuple{}) = ()
@def unique(xs::Tuple) =
afoldl((), xs...) do xs′, x
x in xs′ ? xs′ : (xs′..., x)
end
@def unique(f, xs::Tuple{}) = ()
@def function unique(f::F, xs::Tuple) where {F}
ys = map(f, xs)
xs′, = afoldl(((), ()), zip(xs, ys)...) do (xs, ys), (x, y)
y in ys ? (xs, ys) : ((ys..., y), (xs..., x))
end
return xs′
end
@inline union(args...) = Base.union(args...)
@def union(a::Tuple) = Specialized.unique(a)
@def union(a::Tuple, b::Tuple) = unique((a..., b...))
@def union(a::Tuple, bs::Tuple...) = afoldl(union, a, bs...)
@inline intersect(s, itrs...) = Base.intersect(s, itrs...)
@def intersect(a::Tuple) = a
@def intersect(a::Tuple{}, b::Tuple{}) = ()
@def intersect(a::Tuple, b::Tuple{}) = ()
@def intersect(a::Tuple{}, b::Tuple) = ()
@def intersect(a::Tuple, b::Tuple) = unique(filter(x -> x in b, a))
@def intersect(a::Tuple, b::Tuple, cs::Tuple...) =
foldl(Specialized.intersect, cs, init = Specialized.intersect(a, b))
@inline setdiff(args...) = Base.setdiff(args...)
@def setdiff(a::Tuple, b::Tuple{}) = a
@def setdiff(a::Tuple, b::Tuple) = setdiff(_exclude(a, b[1]), Base.tail(b))
@inline _exclude(a, b) = Base.foldl((ys, x) -> x == b ? ys : (ys..., x), a; init = ())
@def setdiff(a::Tuple, b::Tuple, cs::Tuple...) =
Base.foldl(Specialized.setdiff, cs, init = Specialized.setdiff(a, b))
@def symdiff(a::Tuple, b::Tuple) =
foldl(b; init = a) do a, x
i = findfirst(==(x), a)
i == nothing ? (a..., x) : _deleteat(a, i)
end
@def symdiff(a::Tuple, b::Tuple, cs::Tuple...) =
foldl(Specialized.symdiff, cs, init = Specialized.symdiff(a, b))
_deleteat(a, i::Int) =
foldl(a; init = ((), 1)) do (ys, j), x
((i == j ? ys : (ys..., x)), j + 1)
end
@inline issubset(a, b) = Base.issubset(a, b)
@def issubset(a::Tuple, b::Tuple) = Specialized.all(x -> x in b, a)
if VERSION >= v"1.5"
@inline isdisjoint(a, b) = Base.isdisjoint(a, b)
else
@inline isdisjoint(a, b) = isempty(intersect(a, b))
end
@def isdisjoint(a::Tuple, b::Tuple) = !Specialized.any(x -> x in b, a)
sort(v; kw...) = Base.sort(v; kw...)
@def sort(
v::Tuple;
lt = isless,
by = identity,
rev::Union{Bool,Nothing} = nothing,
order = Base.Forward,
) = _sort(Base.ord(lt, by, rev, order), v)
@inline _sort(order, ::Tuple{}) = ()
@inline _sort(order, x::Tuple{Any}) = x
@inline _sort(order, (x, y)::Tuple{Any,Any}) = Base.lt(order, y, x) ? (y, x) : (x, y)
@inline function _sort(order, v)
left, right = _halve(v)
return _mergesorted(order, _sort(order, left), _sort(order, right))
end
@inline _mergesorted(order, ::Tuple{}, ::Tuple{}) = ()
@inline _mergesorted(order, ::Tuple{}, right) = right
@inline _mergesorted(order, left, ::Tuple{}) = left
@inline function _mergesorted(order, left, right)
a = left[1]
b = right[1]
if Base.lt(order, b, a)
return (b, _mergesorted(order, left, Base.tail(right))...)
else
return (a, _mergesorted(order, Base.tail(left), right)...)
end
end
@inline function _halve(v::NTuple{N,Any}) where {N}
m = N ÷ 2
return (getindex(v, 1:m), getindex(v, m+1:N))
end
# Compilation takes too long (> 10 sec) for `length(v::Tuple) > 13`.
const Any14{N} =
Tuple{Any,Any,Any,Any,Any,Any,Any,Any,Any,Any,Any,Any,Any,Any,Vararg{Any,N}}
@nospecialize
sort(v::Any14; kw...) = Tuple(Base.sort!(collect(v); kw...))
@specialize
| Baselet | https://github.com/tkf/Baselet.jl.git |
|
[
"MIT"
] | 0.1.1 | aebf55e6d7795e02ca500a689d326ac979aaf89e | code | 724 | """
Baselet.Specialized
Functions that are specialized in Baselet.jl.
"""
baremodule Specialized
function getindex end
function filter end
function zip end
function enumerate end
function accumulate end
function cumsum end
function cumprod end
function flatten end
function foreach end
function any end
function all end
function findfirst end
function findlast end
function findall end
function findmax end
function findmin end
function argmax end
function argmin end
function maximum end
function minimum end
function extrema end
function in end
function unique end
function union end
function intersect end
function setdiff end
function symdiff end
function issubset end
function isdisjoint end
function sort end
end
| Baselet | https://github.com/tkf/Baselet.jl.git |
|
[
"MIT"
] | 0.1.1 | aebf55e6d7795e02ca500a689d326ac979aaf89e | code | 1653 | module Utils
using Base.Meta: isexpr
using ..Specialized
function spec_lhs_rhs(ex)
if isexpr(ex, :where)
lhs, rhs = spec_lhs_rhs(ex.args[1])
return Expr(:where, lhs, ex.args[2:end]...), rhs
elseif isexpr(ex, :call)
f = ex.args[1]
@assert f isa Symbol
spec = Expr(:call, :($Specialized.$f), ex.args[2:end]...)
@debug "@def -> spec_lhs_rhs" ex spec
rhs = strip_typeassert(spec)
if isexpr(get(rhs.args, 2, nothing), :parameters)
rhs.args[2].args .= map(rhs.args[2].args) do x
if isexpr(x, :kw)
Expr(:kw, x.args[1], x.args[1])
else
x
end
end
end
return (spec, rhs)
else
error("Cannot handle expression: ", ex)
end
end
function strip_typeassert(ex)
if isexpr(ex, :(::))
length(ex.args) == 2 ||
error("argument name is required for `@def`. Got:\n", ex)
return ex.args[1]
elseif ex isa Expr
return Expr(ex.head, map(strip_typeassert, ex.args)...)
else
return ex
end
end
"""
@def f(...) = ...
@def function f(...) ... end
Expand function definition for function `f` to:
```julia
@inline \$f(...) = Specialized.\$f(...)
@inline Specialized.\$f(...) = ...
```
"""
macro def(ex)
@assert ex.head in [:function, :(=)]
@assert length(ex.args) == 2
lhs, rhs = spec_lhs_rhs(ex.args[1])
block = Expr(:block, __source__, rhs)
quote
@inline $(ex.args[1]) = $block
@inline $lhs = $(ex.args[2])
end |> esc
end
end # module
using .Utils: @def
| Baselet | https://github.com/tkf/Baselet.jl.git |
|
[
"MIT"
] | 0.1.1 | aebf55e6d7795e02ca500a689d326ac979aaf89e | code | 595 | module TestBaselet
using Test
@testset "$file" for file in sort([
file for file in readdir(@__DIR__) if match(r"^test_.*\.jl$", file) !== nothing
])
# Skip inference test on Julia 1.0. It may work in 1.1 or 1.2 but
# they are note tested.
if VERSION < v"1.3-" && file == "test_inference.jl"
@info "Skip $file for Julia $VERSION"
continue
elseif (
lowercase(get(ENV, "JULIA_PKGEVAL", "false")) == "true" &&
file == "test_inference.jl"
)
@info "Skip $file on PkgEval."
continue
end
include(file)
end
end # module
| Baselet | https://github.com/tkf/Baselet.jl.git |
|
[
"MIT"
] | 0.1.1 | aebf55e6d7795e02ca500a689d326ac979aaf89e | code | 197 | module TestAqua
import Aqua
import Baselet
using Test
Aqua.test_all(Baselet)
@testset "Compare Project.toml and test/Project.toml" begin
Aqua.test_project_extras(Baselet)
end
end # module
| Baselet | https://github.com/tkf/Baselet.jl.git |
|
[
"MIT"
] | 0.1.1 | aebf55e6d7795e02ca500a689d326ac979aaf89e | code | 1342 | module TestInference
using Baselet
using Test
valof(::Val{x}) where {x} = x
macro test_inferred(ex)
quote
f() = $(esc(ex))
Test.@test Test.@inferred(f()) isa Any
end
end
@testset "getindex" begin
@test_inferred Baselet.getindex((Val(1), Val(2), Val(3), Val(4)), 2:3)
end
@testset "zip" begin
@test_inferred Baselet.zip((Val(1), Val(2), Val(3), Val(4)))
@test_inferred Baselet.zip(
(Val(1), Val(2), Val(3), Val(4)),
(Val(5), Val(6), Val(7), Val(8)),
)
end
@testset "flatten" begin
@test_inferred Baselet.flatten(((Val(1), Val(2)), (Val(3), Val(4), Val(5))))
end
@testset "enumerate" begin
if VERSION >= v"1.6-"
@info "Skip inference test for `enumerate` on Julia $VERSION"
else
@test_inferred Val(Baselet.enumerate((Val(1), Val(2), Val(3), Val(4))))
end
end
@testset "any" begin
@test_inferred Val(Baselet.any(valof, (Val(false), Val(false), Val(false))))
@test_inferred Val(Baselet.any(valof, (Val(false), Val(false), Val(true))))
end
@testset "all" begin
@test_inferred Val(Baselet.all(valof, (Val(true), Val(true), Val(true))))
@test_inferred Val(Baselet.all(valof, (Val(true), Val(true), Val(false))))
end
@testset "sort" begin
@test_inferred Baselet.sort((Val(3), Val(1), Val(2), Val(0)), by = valof)
end
end # module
| Baselet | https://github.com/tkf/Baselet.jl.git |
|
[
"MIT"
] | 0.1.1 | aebf55e6d7795e02ca500a689d326ac979aaf89e | code | 482 | module TestSort
using Baselet.Specialized: sort
using Random: shuffle
using Test
@testset for n in 1:10
one_to_n = ntuple(identity, n)
@test sort(one_to_n) == one_to_n
@test sort(one_to_n; by = first) == one_to_n
@test sort(one_to_n; by = _ -> 1) == one_to_n # stable sort
@test sort(Tuple(shuffle(1:n))) == one_to_n
@test sort(one_to_n; by = inv) == reverse(one_to_n)
@test sort(Tuple(shuffle(1:n)); by = inv) == reverse(one_to_n)
end
end # module
| Baselet | https://github.com/tkf/Baselet.jl.git |
|
[
"MIT"
] | 0.1.1 | aebf55e6d7795e02ca500a689d326ac979aaf89e | code | 4761 | module TestWithBase
using Base.Iterators: flatten
using Baselet
using Test
args_and_kwargs(args...; kwargs...) = args, (; kwargs...)
==ᶜ(x, y) = collect(x) == collect(y)
# Dummy implementations
accumulate(op, xs::Tuple; kw...) = Tuple(Base.accumulate(op, collect(xs); kw...))
cumsum(xs::Tuple) = Tuple(Base.cumsum(collect(xs)))
cumprod(xs::Tuple) = Tuple(Base.cumprod(collect(xs)))
raw_testdata_pure = """
getindex((100, 200, 300, 400), 2:3) ==
zip((11, 22, 33)) ==ᶜ
zip((11, 22, 33), (111, 222, 333)) ==ᶜ
accumulate(+, (11, 22, 33)) ===
accumulate(+, (11, 22, 33); init=100) ===
cumsum((11, 22, 33)) ===
cumprod((11, 22, 33)) ===
enumerate((11, 22, 33)) ==ᶜ
flatten(((11, 22), (33, 44, 55))) ==ᶜ
any((false, false, false)) ===
any((false, true, false)) ===
any((false, true, false, missing)) ===
any((false, false, false, missing)) ===
any(==(0), (10, 11, 12)) ===
any(==(10), (10, 11, 12)) ===
any(==(0), (10, 11, 12, missing)) ===
any(==(10), (10, 11, 12, missing)) ===
all((true, true, true)) ===
all((true, false, true)) ===
all((true, true, true, missing)) ===
all((true, false, true, missing)) ===
all(==(0), (10, 10, 10)) ===
all(==(10), (10, 10, 10)) ===
all(==(0), (10, 10, 10, missing)) ===
all(==(10), (10, 10, 10, missing)) ===
findfirst(()) ==
findfirst((false, true)) ==
findfirst((false, false)) ==
findfirst(==(10), ()) ==
findfirst(==(10), (11, 10, 10)) ==
findfirst(==(10), (11, 11, 11)) ==
findlast(()) ==
findlast((false, true)) ==
findlast((false, false)) ==
findlast(==(10), ()) ==
findlast(==(10), (11, 10, 10)) ==
findlast(==(10), (11, 11, 11)) ==
findall(()) ==ᶜ
findall((false, true)) ==ᶜ
findall((false, false)) ==ᶜ
findall(==(10), ()) ==ᶜ
findall(==(10), (11, 10, 10)) ==ᶜ
findall(==(10), (11, 11, 11)) ==ᶜ
findmax((1, 2, 3, 2, 3, 1)) ===
findmax((1, NaN, 2, 3, NaN, 2)) ===
findmin((1, 2, 3, 2, 3, 1)) ===
findmin((1, NaN, 2, 3, NaN, 2)) ===
argmax((1, 2, 3, 2, 3, 1)) ===
argmax((1, NaN, 2, 3, NaN, 2)) ===
argmin((1, 2, 3, 2, 3, 1)) ===
argmin((1, NaN, 2, 3, NaN, 2)) ===
maximum((1, 2, 3, 2, 3, 1)) ===
maximum((1, NaN, 2, 3, NaN, 2)) ===
minimum((1, 2, 3, 2, 3, 1)) ===
minimum((1, NaN, 2, 3, NaN, 2)) ===
extrema((1, 2, 3, 2, 3, 1)) ===
extrema((1, NaN, 2, 3, NaN, 2)) ===
in(1, (1, 2, 3)) ===
in(0, (1, 2, 3)) ===
in(0, (1, 2, 3, missing)) ===
in(1, (1, 2, 3, missing)) ===
in(missing, (missing,)) ===
in(NaN, (NaN,)) ===
in(0.0, (-0.0,)) ===
unique((11, 12, 13)) ==ᶜ
unique((11, 11, 11)) ==ᶜ
unique(abs, (11, -12, 13, 12)) ==ᶜ
unique(abs, (11, -11, 11)) ==ᶜ
union((11, 12, 13)) ==ᶜ
union((11, 12, 13), (12, 13)) ==ᶜ
union((11, 12, 13), (12, 13, 14), (15, 13, 12)) ==ᶜ
intersect((11, 12, 13)) ==ᶜ
intersect((11, 12, 13), (12, 13)) ==ᶜ
intersect((11, 12, 13), (12, 13, 14), (15, 13, 12)) ==ᶜ
setdiff((11, 12, 13), ()) ==ᶜ
setdiff((), (11, 12, 13)) ==ᶜ
setdiff((11, 12, 13), (12, 13)) ==ᶜ
setdiff((11, 12, 13), (12, 13, 14), (15, 13, 12)) ==ᶜ
"""
# An array of `(label, (f, args, kwargs, comparison))`
testdata_pure = map(split(raw_testdata_pure, "\n", keepempty = false)) do x
f, rest = split(x, "(", limit = 2)
input, comparison = rsplit(rest, ")", limit = 2)
comparison = strip(comparison)
ex = Meta.parse("DUMMY($input)")
ex.args[1] = args_and_kwargs
@eval ($x, ($(Symbol(f)), $ex..., $(Symbol(comparison))))
end
@testset "$label" for (label, (f, args, kwargs, ==′)) in testdata_pure
Baselet_f = getproperty(Baselet, nameof(f))
Specialized_f = getproperty(Baselet.Specialized, nameof(f))
@test Baselet_f(args...; kwargs...) ==′ f(args...; kwargs...)
@test Specialized_f(args...; kwargs...) ==′ f(args...; kwargs...)
@test typeof(Baselet_f(args...; kwargs...)) == typeof(Specialized_f(args...; kwargs...))
end
function test_all_implementations(test)
@testset for m in [Base, Baselet, Baselet.Specialized]
test(m)
end
end
@testset "foreach(x -> push!(xs, x), 1:5)" begin
test_all_implementations() do m
xs = Int[]
m.foreach(1:5) do x
push!(xs, x)
end
@test xs == 1:5
end
end
@testset "foreach(x -> push!(xs, x), (1, 2, 3))" begin
test_all_implementations() do m
xs = Int[]
m.foreach((1, 2, 3)) do x
push!(xs, x)
end
@test xs == 1:3
end
end
@testset "foreach((a, b) -> push!(xs, a + b), 1:5, 6:10)" begin
test_all_implementations() do m
xs = Int[]
m.foreach(1:5, 6:10) do a, b
push!(xs, a + b)
end
@test xs == 7:2:15
end
end
@testset "foreach((a, b) -> push!(xs, a + b), (1, 2, 3), (6, 7, 8))" begin
test_all_implementations() do m
xs = Int[]
m.foreach((1, 2, 3), (6, 7, 8)) do a, b
push!(xs, a + b)
end
@test xs == 7:2:11
end
end
end # module
| Baselet | https://github.com/tkf/Baselet.jl.git |
|
[
"MIT"
] | 0.1.1 | aebf55e6d7795e02ca500a689d326ac979aaf89e | docs | 1328 | # Baselet: `Base` API optimized for tuples
[](https://github.com/tkf/Baselet.jl/actions?query=workflow%3A%22Run+tests%22)
[](https://github.com/tkf/Aqua.jl)
## API
* `Baselet.$f` provides a possibly-optimized version of `$f` exported
from `Base` (e.g., `Baselet.sort(::Tuple)`).
* `Baselet.Specialized.$f` provides a function `$f` with a subset of
API from `Base.$f` that is _guaranteed_ to have optimized
specializations (e.g., `Baselet.Specialized.sort(::Tuple)`).
* `Baselet.$f` fallbacks to `Base.$f` if associated
`Baselet.Specialized.$f` is not found. For example,
`Baselet.sort(::Vector)` just calls `Base.sort(::Vector)`.
The list of supported functions can be found by typing
`Baselet.Specialized.` + <kbd>TAB</kbd> in the REPL:
```julia
julia> using Baselet
julia> Baselet.Specialized.
accumulate cumprod findall flatten isdisjoint sort
all cumsum findfirst foreach issubset symdiff
any enumerate findlast getindex maximum union
argmax extrema findmax in minimum unique
argmin filter findmin intersect setdiff zip
```
| Baselet | https://github.com/tkf/Baselet.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 2671 | using Revise
using Random
using CSV
using DataFrames
using StatsBase
using Statistics: mean, std
using CUDA
using EvoTrees
using Solage: Connectors
using AWS: AWSCredentials, AWSConfig, @service
@service S3
aws_creds = AWSCredentials(ENV["AWS_ACCESS_KEY_ID_JDB"], ENV["AWS_SECRET_ACCESS_KEY_JDB"])
aws_config = AWSConfig(; creds=aws_creds, region="ca-central-1")
bucket = "jeremiedb"
path = "share/data/higgs/HIGGS.arrow"
df_tot = Connectors.read_arrow_aws(path; bucket="jeremiedb", aws_config)
rename!(df_tot, "Column1" => "y")
feature_names = setdiff(names(df_tot), ["y"])
target_name = "y"
function percent_rank(x::AbstractVector{T}) where {T}
return tiedrank(x) / (length(x) + 1)
end
transform!(df_tot, feature_names .=> percent_rank .=> feature_names)
dtrain = df_tot[1:end-500_000, :];
deval = df_tot[end-500_000+1:end, :];
dtest = df_tot[end-500_000+1:end, :];
config = EvoTreeRegressor(
loss=:logloss,
nrounds=5000,
eta=0.2,
nbins=224,
max_depth=11,
L2=1,
lambda=0.0,
gamma=0.0,
rowsample=0.8,
colsample=0.8,
min_weight=1,
rng=123,
)
device = "cpu"
metric = "logloss"
@time m_evo = fit_evotree(config, dtrain; target_name, fnames=feature_names, deval, metric, device, early_stopping_rounds=200, print_every_n=100);
p_test = m_evo(dtest);
logloss_test = mean(-dtest.y .* log.(p_test) .+ (dtest.y .- 1) .* log.(1 .- p_test))
@info "LogLoss - dtest" logloss_test
error_test = 1 - mean(round.(Int, p_test) .== dtest.y)
@info "ERROR - dtest" error_test
# ┌ Info: LogLoss - dtest
# └ logloss_test = 0.4716574579097044
# ┌ Info: ERROR - dtest
# └ error_test = 0.229522
@info "XGBoost"
@info "train"
using XGBoost
params_xgb = Dict(
:num_round => 2000,
:max_depth => 10,
:eta => 0.15,
:objective => "reg:logistic",
:print_every_n => 5,
:gamma => 0,
:lambda => 1,
:subsample => 0.8,
:colsample_bytree => 0.8,
:tree_method => "gpu_hist", # hist/gpu_hist
:max_bin => 128,
)
dtrain_xgb = DMatrix(select(dtrain, feature_names), dtrain.y)
watchlist = Dict("eval" => DMatrix(select(deval, feature_names), deval.y));
@time m_xgb = xgboost(dtrain_xgb; watchlist, nthread=Threads.nthreads(), verbosity=0, eval_metric="logloss", params_xgb...);
pred_xgb = XGBoost.predict(m_xgb, DMatrix(select(deval, feature_names)));
logloss_test = mean(-dtest.y .* log.(pred_xgb) .+ (dtest.y .- 1) .* log.(1 .- pred_xgb))
@info "LogLoss - dtest" logloss_test
error_test = 1 - mean(round.(Int, pred_xgb) .== dtest.y)
@info "ERROR - xgb test" error_test
# ┌ Info: LogLoss - dtest
# └ logloss_test = 0.4710665675338929
# ┌ Info: ERROR - xgb test
# └ error_test = 0.22987999999999997
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 2710 | using Revise
using Statistics
using StatsBase: sample
# using XGBoost
using EvoTrees
using BenchmarkTools
using Random: seed!
import CUDA
nobs = Int(1e6)
num_feat = Int(100)
nrounds = 200
T = Float64
nthread = Base.Threads.nthreads()
@info "testing with: $nobs observations | $num_feat features. nthread: $nthread"
seed!(123)
x_train = rand(T, nobs, num_feat)
y_train = rand(T, size(x_train, 1))
@info "Gaussian MLE"
params_evo = EvoTreeMLE(
loss=:gaussian,
nrounds=200,
lambda=0.0,
gamma=0.0,
eta=0.05,
max_depth=6,
min_weight=100.0,
rowsample=0.5,
colsample=0.5,
nbins=64,
)
@info "evotrees train CPU:"
device = "cpu"
@time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=:gaussian, device, print_every_n=100);
@time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=:gaussian, device, print_every_n=100);
# @btime fit_evotree($params_evo; x_train=$x_train, y_train=$y_train, x_eval=$x_train, y_eval=$y_train, metric=:gaussian);
@info "evotrees predict CPU:"
@time pred_evo = m_evo(x_train);
@btime m_evo($x_train);
CUDA.allowscalar(true)
@info "evotrees train GPU:"
device = "gpu"
# @time m_evo = fit_evotree(params_evo; x_train, y_train);
@time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=:gaussian, device, print_every_n=100);
@time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=:gaussian, device, print_every_n=100);
# @btime fit_evotree($params_evo; x_train=$x_train, y_train=$y_train, x_eval=$x_train, y_eval=$y_train, metric=:gaussian);
@info "evotrees predict GPU:"
@time pred_evo = m_evo(x_train; device);
@btime m_evo($x_train; device);
################################
# Logistic
################################
@info "Logistic MLE"
params_evo = EvoTreeMLE(
loss=:logistic,
nrounds=nrounds,
lambda=0.0,
gamma=0.0,
eta=0.05,
max_depth=6,
min_weight=100.0,
rowsample=0.5,
colsample=0.5,
nbins=64,
)
@info "testing with: $nobs observations | $num_feat features."
x_train = rand(nobs, num_feat)
y_train = rand(size(x_train, 1))
@info "evotrees train CPU:"
device = "cpu"
@time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=:logistic_mle, print_every_n=100);
@time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=:logistic_mle, print_every_n=100);
# @btime fit_evotree($params_evo; x_train=$x_train, y_train=$y_train, x_eval=$x_train, y_eval=$y_train, metric=:logistic_mle);
@info "evotrees predict CPU:"
@time pred_evo = m_evo(x_train);
@btime m_evo($x_train); | EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 2112 | using Revise
using CSV
using DataFrames
using EvoTrees
using StatsBase: sample, tiedrank
using Statistics
using Random: seed!
using AWS: AWSCredentials, AWSConfig, @service
@service S3
aws_creds = AWSCredentials(ENV["AWS_ACCESS_KEY_ID_JDB"], ENV["AWS_SECRET_ACCESS_KEY_JDB"])
aws_config = AWSConfig(; creds = aws_creds, region = "ca-central-1")
path = "share/data/year/year.csv"
raw = S3.get_object(
"jeremiedb",
path,
Dict("response-content-type" => "application/octet-stream");
aws_config,
)
df = DataFrame(CSV.File(raw, header = false))
path = "share/data/year/year-train-idx.txt"
raw = S3.get_object(
"jeremiedb",
path,
Dict("response-content-type" => "application/octet-stream");
aws_config,
)
train_idx = DataFrame(CSV.File(raw, header = false))[:, 1] .+ 1
path = "share/data/year/year-eval-idx.txt"
raw = S3.get_object(
"jeremiedb",
path,
Dict("response-content-type" => "application/octet-stream");
aws_config,
)
eval_idx = DataFrame(CSV.File(raw, header = false))[:, 1] .+ 1
X = df[:, 2:end]
Y_raw = Float64.(df[:, 1])
Y = (Y_raw .- mean(Y_raw)) ./ std(Y_raw)
function percent_rank(x::AbstractVector{T}) where {T}
return tiedrank(x) / (length(x) + 1)
end
transform!(X, names(X) .=> percent_rank .=> names(X))
X = collect(Matrix{Float32}(X))
Y = Float32.(Y)
x_tot, y_tot = X[1:(end-51630), :], Y[1:(end-51630)]
x_test, y_test = X[(end-51630+1):end, :], Y[(end-51630+1):end]
x_train, x_eval = x_tot[train_idx, :], x_tot[eval_idx, :]
y_train, y_eval = y_tot[train_idx], y_tot[eval_idx]
config = EvoTreeRegressor(
T = Float32,
nrounds = 1200,
loss = :linear,
eta = 0.1,
nbins = 128,
min_weight = 4,
max_depth = 7,
lambda = 0,
gamma = 0,
rowsample = 0.8,
colsample=0.8,
)
# @time m = fit_evotree(config; x_train, y_train, print_every_n=25);
@time m, logger = fit_evotree(
config;
x_train,
y_train,
x_eval,
y_eval,
early_stopping_rounds = 100,
print_every_n = 10,
metric = :mse,
return_logger = true,
);
p_evo = m(x_test);
mean((p_evo .- y_test) .^ 2) * std(Y_raw)^2
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 6736 | using Revise
using CSV
using DataFrames
using EvoTrees
using StatsBase: sample, tiedrank
using Statistics
using Random: seed!
using ReadLIBSVM
# using GLMakie
# data is C14 - Yahoo! Learning to Rank Challenge
# data can be obtained though a request to https://webscope.sandbox.yahoo.com/
using AWS: AWSCredentials, AWSConfig, @service
@service S3
aws_creds = AWSCredentials(ENV["AWS_ACCESS_KEY_ID_JDB"], ENV["AWS_SECRET_ACCESS_KEY_JDB"])
aws_config = AWSConfig(; creds=aws_creds, region="ca-central-1")
function read_libsvm_aws(file::String; has_query=false, aws_config=AWSConfig())
raw = S3.get_object("jeremiedb", file, Dict("response-content-type" => "application/octet-stream"); aws_config)
return read_libsvm(raw; has_query)
end
function ndcg(p, y, k=10)
k = min(k, length(p))
p_order = partialsortperm(p, 1:k, rev=true)
y_order = partialsortperm(y, 1:k, rev=true)
_y = y[p_order]
gains = 2 .^ _y .- 1
discounts = log2.((1:k) .+ 1)
ndcg = sum(gains ./ discounts)
y_order = partialsortperm(y, 1:k, rev=true)
_y = y[y_order]
gains = 2 .^ _y .- 1
discounts = log2.((1:k) .+ 1)
idcg = sum(gains ./ discounts)
return idcg == 0 ? 1.0 : ndcg / idcg
end
p = [6, 5, 4, 3, 2, 1, 0, -1] .+ 100
y = [3, 2, 3, 0, 1, 2, 3, 2]
ndcg(p, y, 6)
@time dtrain = read_libsvm_aws("share/data/yahoo-ltrc/set1.train.txt"; has_query=true, aws_config)
@time deval = read_libsvm_aws("share/data/yahoo-ltrc/set1.valid.txt"; has_query=true, aws_config)
@time dtest = read_libsvm_aws("share/data/yahoo-ltrc/set1.test.txt"; has_query=true, aws_config)
colsums_train = map(sum, eachcol(dtrain[:x]))
# colsums_eval = map(sum, eachcol(deval[:x]))
colsums_test = map(sum, eachcol(deval[:x]))
sum(colsums_train .== 0)
sum(colsums_test .== 0)
@assert all((colsums_train .== 0) .== (colsums_test .== 0))
drop_cols = colsums_train .== 0
x_train = dtrain[:x][:, .!drop_cols]
x_eval = deval[:x][:, .!drop_cols]
x_test = dtest[:x][:, .!drop_cols]
# x_train_miss = x_train .== 0
# x_eval_miss = x_eval .== 0
# x_test_miss = x_test .== 0
# x_train[x_train.==0] .= 0.5
# x_eval[x_eval.==0] .= 0.5
# x_test[x_test.==0] .= 0.5
# x_train = hcat(x_train, x_train_miss)
# x_eval = hcat(x_eval, x_eval_miss)
# x_test = hcat(x_test, x_test_miss)
q_train = dtrain[:q]
q_eval = deval[:q]
q_test = dtest[:q]
y_train = dtrain[:y];
y_eval = deval[:y];
y_test = dtest[:y];
#####################################
# mse regression
#####################################
y_train = dtrain[:y]
y_eval = deval[:y]
y_test = dtest[:y]
config = EvoTreeRegressor(
nrounds=6000,
loss=:mse,
eta=0.02,
nbins=64,
max_depth=11,
rowsample=0.9,
colsample=0.9,
)
# @time m = fit_evotree(config; x_train, y_train, print_every_n=25);
@time m_mse, logger_mse = fit_evotree(
config;
x_train=x_train,
y_train=y_train,
x_eval=x_eval,
y_eval=y_eval,
early_stopping_rounds=200,
print_every_n=50,
metric=:mse,
return_logger=true
);
p_test = m_mse(x_test);
test_df = DataFrame(p=p_test, y=y_test, q=q_test)
test_df_agg = combine(groupby(test_df, "q"), ["p", "y"] => ndcg => "ndcg")
ndcg_test = round(mean(test_df_agg.ndcg), sigdigits=5)
@info "MSE - test data - MSE model" mean((p_test .- y_test) .^ 2)
@info "NDCG - test data - MSE model" ndcg_test
#####################################
# logistic regression
#####################################
max_rank = 4
y_train = dtrain[:y] ./ max_rank
y_eval = deval[:y] ./ max_rank
y_test = dtest[:y] ./ max_rank
config = EvoTreeRegressor(
nrounds=6000,
loss=:logloss,
eta=0.01,
nbins=64,
max_depth=11,
rowsample=0.9,
colsample=0.9,
)
@time m_logloss, logger_logloss = fit_evotree(
config;
x_train=x_train,
y_train=y_train,
x_eval=x_eval,
y_eval=y_eval,
early_stopping_rounds=200,
print_every_n=50,
metric=:logloss,
return_logger=true
);
# use the original y since NDCG is scale sensitive
y_train = dtrain[:y]
y_eval = deval[:y]
y_test = dtest[:y]
# p_eval = m(x_eval);
# eval_df = DataFrame(p = p_eval, y = y_eval, q = q_eval)
# eval_df_agg = combine(groupby(eval_df, "q"), ["p", "y"] => ndcg => "ndcg")
# ndcg_eval = mean(eval_df_agg.ndcg)
p_test = m_logloss(x_test);
test_df = DataFrame(p=p_test, y=y_test, q=q_test)
test_df_agg = combine(groupby(test_df, "q"), ["p", "y"] => ndcg => "ndcg")
ndcg_test = round(mean(test_df_agg.ndcg), sigdigits=5)
@info "NDCG - test data - LogLoss model" ndcg_test
#####################################
# logistic regression on DataFrame
#####################################
target_name = "y"
df_train = DataFrame(x_train, :auto)
df_train.y = dtrain[:y] ./ 4
df_train.q = dtrain[:q]
df_eval = DataFrame(x_eval, :auto)
df_eval.y = deval[:y] ./ 4
df_eval.q = deval[:q]
df_test = DataFrame(x_test, :auto)
df_test.y = dtest[:y] ./ 4
df_test.q = dtest[:q]
function rank_target_norm(y::AbstractVector)
out = similar(y)
if minimum(y) == maximum(y)
out .= 0.5
else
out .= (y .- minimum(y)) ./ (maximum(y) - minimum(y))
end
return out
end
function percent_rank(x::AbstractVector{T}) where {T}
return tiedrank(x) / (length(x) + 1)
end
feature_names_raw = setdiff(names(df_train), ["y", "q"])
feature_names_rel = feature_names_raw .* "_rel"
transform!(df_train, feature_names_raw .=> percent_rank .=> feature_names_rel)
transform!(df_eval, feature_names_raw .=> percent_rank .=> feature_names_rel)
transform!(df_test, feature_names_raw .=> percent_rank .=> feature_names_rel)
feature_names = setdiff(names(df_train), ["y", "q"])
# df_train = transform!(
# groupby(df_train, "q"),
# "y" => rank_target_norm => "y")
# df_eval = transform!(
# groupby(df_eval, "q"),
# "y" => rank_target_norm => "y")
# df_test = transform!(
# groupby(df_test, "q"),
# "y" => rank_target_norm => "y")
minimum(df_eval.y)
maximum(df_eval.y)
config = EvoTreeRegressor(
nrounds=6000,
loss=:logloss,
eta=0.01,
nbins=64,
max_depth=11,
rowsample=0.9,
colsample=0.9,
)
@time m_logloss_df, logger_logloss_df = fit_evotree(
config,
df_train;
target_name,
fnames=feature_names_raw,
deval=df_eval,
early_stopping_rounds=200,
print_every_n=50,
metric=:logloss,
return_logger=true
);
m_logloss_df.info
p_test_df = m_logloss_df(df_test);
# p_test_mat = m_logloss_df(x_test);
EvoTrees.importance(m_logloss_df)
p_test = m_logloss_df(df_test);
test_df = DataFrame(p=p_test, y=dtest[:y], q=dtest[:q])
test_df_agg = combine(groupby(test_df, "q"), ["p", "y"] => ndcg => "ndcg")
ndcg_test = mean(test_df_agg.ndcg)
# ndcg_test = 0.8022558972243291
# ndcg_test = 0.8020754563069513
@info "NDCG - test data - LogLoss DF model" ndcg_test
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 2662 | using CSV
using DataFrames
using EvoTrees
using XGBoost
using StatsBase: sample
using AWS: AWSCredentials, AWSConfig, @service
@service S3
aws_creds = AWSCredentials(ENV["AWS_ACCESS_KEY_ID_JDB"], ENV["AWS_SECRET_ACCESS_KEY_JDB"])
aws_config = AWSConfig(; creds=aws_creds, region="ca-central-1")
path = "share/data/insurance-aicrowd.csv"
raw = S3.get_object("jeremiedb", path, Dict("response-content-type" => "application/octet-stream"); aws_config)
df = DataFrame(CSV.File(raw))
transform!(df, "claim_amount" => ByRow(x -> x > 0 ? 1.0f0 : 0.0f0) => "event")
target = "event"
feats = ["vh_age", "vh_value", "vh_speed", "vh_weight", "drv_age1",
"pol_no_claims_discount", "pol_coverage", "pol_duration", "pol_sit_duration"]
pol_cov_dict = Dict{String,Float64}(
"Min" => 1,
"Med1" => 2,
"Med2" => 3,
"Max" => 4)
pol_cov_map(x) = get(pol_cov_dict, x, 4)
transform!(df, "pol_coverage" => ByRow(pol_cov_map) => "pol_coverage")
setdiff(feats, names(df))
nobs = nrow(df)
id_train = sample(1:nobs, Int(round(0.8 * nobs)), replace=false)
df_train = dropmissing(df[id_train, [feats..., target]])
df_eval = dropmissing(df[Not(id_train), [feats..., target]])
x_train = Matrix{Float32}(df_train[:, feats])
x_eval = Matrix{Float32}(df_eval[:, feats])
y_train = Vector{Float32}(df_train[:, target])
y_eval = Vector{Float32}(df_eval[:, target])
config = EvoTreeRegressor(T=Float32,
loss=:logistic,
lambda=0.02,
gamma=0,
nbins=32,
max_depth=5,
rowsample=0.5,
colsample=0.8,
nrounds=400,
tree_type="oblivious",
eta=0.05)
# @time m = fit_evotree(config; x_train, y_train, print_every_n=25);
@time m = fit_evotree(config; x_train, y_train, x_eval, y_eval, early_stopping_rounds=50, print_every_n=25, metric=:logloss);
pred_eval_evo = m(x_eval) |> vec;
params_xgb = [
"objective" => "reg:logistic",
"booster" => "gbtree",
"eta" => 0.05,
"max_depth" => 4,
"lambda" => 10.0,
"gamma" => 0.0,
"subsample" => 0.5,
"colsample_bytree" => 0.8,
"tree_method" => "hist",
"max_bin" => 32,
"print_every_n" => 5]
nthread = Threads.nthreads()
nthread = 8
nrounds = 400
metrics = ["logloss"]
@info "xgboost train:"
@time m_xgb = xgboost(x_train, nrounds, label=y_train, param=params_xgb, metrics=metrics, nthread=nthread, silent=1);
pred_eval_xgb = XGBoost.predict(m_xgb, x_eval)
function logloss(p::Vector{T}, y::Vector{T}) where {T<:AbstractFloat}
eval = zero(T)
@inbounds for i in eachindex(y)
eval -= (y[i] * log(p[i]) + (1 - y[i]) * log(1 - p[i]))
end
eval /= length(p)
return eval
end
logloss(pred_eval_evo, y_eval)
logloss(pred_eval_xgb, y_eval) | EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 2258 | using Statistics
using StatsBase:sample
using Revise
using EvoTrees
using MemoryConstrainedTreeBoosting
nrounds = 100
# EvoTrees params
params_evo = EvoTreeRegressor(T=Float32,
loss=:logistic, metric=:logloss,
nrounds=nrounds,
lambda=0.5, gamma=0.0, eta=0.05,
max_depth=6, min_weight=1.0,
rowsample=1.0, colsample=0.5, nbins=64)
# MemoryConstrainedTreeBoosting params
params_mctb = (
weights = nothing,
bin_count = 128,
iteration_count = nrounds,
min_data_weight_in_leaf = 1.0,
l2_regularization = 0.0,
max_leaves = 64,
max_depth = 6,
max_delta_score = 1.0e10, # Before shrinkage.
learning_rate = 0.05,
feature_fraction = 0.5, # Per tree.
bagging_temperature = 0.1,
)
nobs = Int(1e6)
num_feat = Int(100)
@info "testing with: $nobs observations | $num_feat features."
X = rand(Float32, nobs, num_feat)
Y = Float32.(rand(Bool, size(X, 1)))
@info "evotrees train CPU:"
params_evo.device = "cpu"
@time m, cache = EvoTrees.init_evotree(params_evo, X, Y);
@time EvoTrees.grow_evotree!(m, cache);
@time m, cache = EvoTrees.init_evotree(params_evo, X, Y);
@time EvoTrees.grow_evotree!(m, cache);
@time m_evo = fit_evotree(params_evo, X, Y);
@time fit_evotree(params_evo, X, Y);
@info "evotrees predict CPU:"
@time pred_evo = EvoTrees.predict(m_evo, X);
@time EvoTrees.predict(m_evo, X);
@info "evotrees train GPU:"
params_evo.device = "gpu"
@time m_evo = fit_evotree(params_evo, X, Y);
@time fit_evotree(params_evo, X, Y);
@info "evotrees predict GPU:"
@time pred_evo = EvoTrees.predict(m_evo, X);
@time EvoTrees.predict(m_evo, X);
@info "MemoryConstrainedTreeBoosting train CPU:"
@time bin_splits, trees = MemoryConstrainedTreeBoosting.train(X, Y; params_mctb...);
@time MemoryConstrainedTreeBoosting.train(X, Y; params_mctb...);
@info "MemoryConstrainedTreeBoosting predict CPU, JITed:"
save_path = tempname()
MemoryConstrainedTreeBoosting.save(save_path, bin_splits, trees)
unbinned_predict = MemoryConstrainedTreeBoosting.load_unbinned_predictor(save_path)
@time pred_mctb = unbinned_predict(X)
@time unbinned_predict(X) | EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 3309 | using Revise
using Statistics
using StatsBase: sample
using XGBoost
using EvoTrees
using DataFrames
using BenchmarkTools
using Random: seed!
# import CUDA
nobs = Int(1e6)
num_feat = Int(100)
nrounds = 200
T = Float64
nthread = Base.Threads.nthreads()
@info "testing with: $nobs observations | $num_feat features. nthread: $nthread"
seed!(123)
x_train = rand(T, nobs, num_feat)
y_train = rand(T, size(x_train, 1))
@info nthread
loss = "mse"
if loss == "mse"
loss_xgb = "reg:squarederror"
metric_xgb = "mae"
loss_evo = :mse
metric_evo = :mae
elseif loss == "logloss"
loss_xgb = "reg:logistic"
metric_xgb = "logloss"
loss_evo = :logloss
metric_evo = :logloss
end
@info "XGBoost"
params_xgb = Dict(
:num_round => nrounds,
:max_depth => 5,
:eta => 0.05,
:objective => loss_xgb,
:print_every_n => 5,
:subsample => 0.5,
:colsample_bytree => 0.5,
:tree_method => "hist",
:max_bin => 64,
)
dtrain = DMatrix(x_train, y_train)
watchlist = Dict("train" => DMatrix(x_train, y_train))
@time m_xgb = xgboost(dtrain; watchlist, nthread=nthread, verbosity=0, eval_metric=metric_xgb, params_xgb...);
# @btime m_xgb = xgboost($dtrain; watchlist, nthread=nthread, verbosity=0, eval_metric=metric_xgb, params_xgb...);
@info "xgboost predict:"
@time pred_xgb = XGBoost.predict(m_xgb, x_train);
# @btime XGBoost.predict($m_xgb, $x_train);
@info "EvoTrees"
dtrain = DataFrame(x_train, :auto)
dtrain.y .= y_train
target_name = "y"
verbosity = 0
params_evo = EvoTreeRegressor(
loss=loss_evo,
nrounds=nrounds,
alpha=0.5,
lambda=0.0,
gamma=0.0,
eta=0.05,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=0.5,
nbins=64,
rng=123,
)
@info "EvoTrees CPU"
device = "cpu"
@info "init"
@time m_df, cache_df = EvoTrees.init(params_evo, dtrain; target_name);
@time m_df, cache_df = EvoTrees.init(params_evo, dtrain; target_name);
# @info "train - no eval"
# @time m_evo_df = fit_evotree(params_evo, dtrain; target_name, device, verbosity, print_every_n=100);
# @time m_evo_df = fit_evotree(params_evo, dtrain; target_name, device, verbosity, print_every_n=100);
@info "train - eval"
@time m_evo = fit_evotree(params_evo, dtrain; target_name, deval=dtrain, metric=metric_evo, device, verbosity, print_every_n=100);
@time m_evo = fit_evotree(params_evo, dtrain; target_name, deval=dtrain, metric=metric_evo, device, verbosity, print_every_n=100);
# @time m_evo = fit_evotree(params_evo, dtrain; target_name, device);
# @btime fit_evotree($params_evo, $dtrain; target_name, deval=dtrain, metric=metric_evo, device, verbosity, print_every_n=100);
@info "predict"
@time pred_evo = m_evo(dtrain);
@btime m_evo($dtrain);
@info "EvoTrees GPU"
device = "gpu"
@info "train"
@time m_evo = fit_evotree(params_evo, dtrain; target_name, deval=dtrain, metric=metric_evo, device, verbosity, print_every_n=100);
@time m_evo = fit_evotree(params_evo, dtrain; target_name, deval=dtrain, metric=metric_evo, device, verbosity, print_every_n=100);
# @btime m_evo = fit_evotree($params_evo, $dtrain; target_name, device);
# @btime fit_evotree($params_evo, $dtrain; target_name, deval=dtrain, metric=metric_evo, device, verbosity, print_every_n=100);
@info "predict"
@time pred_evo = m_evo(dtrain; device);
@btime m_evo($dtrain; device);
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 3131 | using Revise
using Statistics
using StatsBase: sample
using EvoTrees
using DataFrames
using BenchmarkTools
using Random: seed!
import CUDA
using MLJ
nobs = Int(2e6)
num_feat = Int(100)
nrounds = 200
T = Float64
nthread = Base.Threads.nthreads()
@info "testing with: $nobs observations | $num_feat features. nthread: $nthread"
seed!(123)
x_train = rand(T, nobs, num_feat)
y_train = rand(T, size(x_train, 1))
@info nthread
loss = "mse"
if loss == "mse"
loss_evo = :mse
metric_evo = :mae
elseif loss == "logloss"
loss_evo = :logloss
metric_evo = :logloss
end
@info "EvoTrees"
dtrain = DataFrame(x_train, :auto)
# dtrain.y .= y_train
# target_name = "y"
verbosity = 0
params_evo = EvoTreeRegressor(
loss=loss_evo,
nrounds=nrounds,
alpha=0.5,
lambda=0.0,
gamma=0.0,
eta=0.05,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=0.5,
nbins=64,
rng=123,
)
@info "EvoTrees CPU"
device = "cpu"
iterated_model = IteratedModel(
model=params_evo,
resampling=Holdout(; fraction_train=0.5),
measures=rmse,
controls=[Step(5),
Patience(200),
NumberLimit(40)],
retrain=false)
mach = machine(iterated_model, dtrain, y_train)
@time fit!(mach);
@info "init"
@time m_df, cache_df = EvoTrees.init(params_evo, dtrain; target_name);
# @info "train - no eval"
# @time m_evo_df = fit_evotree(params_evo, dtrain; target_name, device, verbosity, print_every_n=100);
# @time m_evo_df = fit_evotree(params_evo, dtrain; target_name, device, verbosity, print_every_n=100);
@info "train - eval"
@time m_evo = fit_evotree(params_evo, dtrain; target_name, deval=dtrain, metric=metric_evo, device, verbosity, print_every_n=100);
@time m_evo = fit_evotree(params_evo, dtrain; target_name, deval=dtrain, metric=metric_evo, device, verbosity, print_every_n=100);
# @time m_evo = fit_evotree(params_evo, dtrain; target_name, device);
# @btime fit_evotree($params_evo, $dtrain; target_name, deval=dtrain, metric=metric_evo, device, verbosity, print_every_n=100);
@info "predict"
@time pred_evo = m_evo(dtrain);
@btime m_evo($dtrain);
@info "EvoTrees GPU"
device = "gpu"
@info "train"
@time m_evo = fit_evotree(params_evo, dtrain; target_name, deval=dtrain, metric=metric_evo, device, verbosity, print_every_n=100);
@time m_evo = fit_evotree(params_evo, dtrain; target_name, deval=dtrain, metric=metric_evo, device, verbosity, print_every_n=100);
# @btime m_evo = fit_evotree($params_evo, $dtrain; target_name, device);
# @btime fit_evotree($params_evo, $dtrain; target_name, deval=dtrain, metric=metric_evo, device, verbosity, print_every_n=100);
@info "predict"
@time pred_evo = m_evo(dtrain; device);
@btime m_evo($dtrain; device);
using MLJBase
using MLJModels
using Tables
EvoTreeBooster = @load EvoTreeRegressor
booster = EvoTreeBooster()
X, y = make_regression(1000, 5)
# this works:
mach = machine(booster, X, y) |> fit!
# this doesn't
X, y = make_regression(1_000_000, 100);
@time X = DataFrame(X);
@time X = Tables.rowtable(X);
@time X = Tables.columntable(X);
mach = machine(booster, X, y) |> fit!
schema = Tables.schema(dtrain)
schema.names | EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 5045 | using CUDA
using DataFrames
using CSV
using Statistics
using StatsBase: sample
using XGBoost
using EvoTrees
using BenchmarkTools
using Random: seed!
### v.0.15.1
# desktop | 1e6 | depth 11 | cpu: 37.2s
# desktop | 10e6 | depth 11 | cpu
### v0.16.5
# desktop | 1e6 | depth 11 | cpu: 31s gpu: 50 sec | xgboost cpu: 26s
# desktop | 10e6 | depth 11 | cpu 200s gpu: 80 sec | xgboost cpu: 267s
### gpu-hist
# desktop | 1e6 | depth 11 | cpu: Xs gpu: Xs | xgboost cpu: Xs
# desktop | 10e6 | depth 11 | cpu Xs gpu: Xs | xgboost cpu: Xs
run_evo = true
run_xgb = true
max_nrounds = 200
tree_type = "binary"
T = Float64
nthreads = Base.Threads.nthreads()
device_list = ["cpu", "gpu"]
# device_list = ["gpu"]
nobs_list = Int.([1e5, 1e6, 1e7])
# nobs_list = Int.([1e4, 1e5])
nfeats_list = [10, 100]
# nfeats_list = [10]
max_depth_list = [6, 11]
# max_depth_list = [6]
for device in device_list
df = DataFrame()
for nobs in nobs_list
for nfeats in nfeats_list
for max_depth in max_depth_list
_df = DataFrame(
:device => device,
:nobs => nobs,
:nfeats => nfeats,
:max_depth => max_depth)
@info "device: $device | nobs: $nobs | nfeats: $nfeats | max_depth : $max_depth | nthreads: $nthreads | tree_type : $tree_type"
seed!(123)
x_train = rand(T, nobs, nfeats)
y_train = rand(T, size(x_train, 1))
loss = "mse"
if loss == "mse"
loss_xgb = "reg:squarederror"
metric_xgb = "mae"
loss_evo = :mse
metric_evo = :mae
elseif loss == "logloss"
loss_xgb = "reg:logistic"
metric_xgb = "logloss"
loss_evo = :logloss
metric_evo = :logloss
end
tree_method = device == "gpu" ? "gpu_hist" : "hist"
if run_evo
@info "EvoTrees"
verbosity = 1
params_evo = EvoTreeRegressor(;
loss=loss_evo,
nrounds=max_nrounds,
alpha=0.5,
lambda=0.0,
gamma=0.0,
eta=0.05,
max_depth=max_depth,
min_weight=1.0,
rowsample=0.5,
colsample=0.5,
nbins=64,
tree_type,
rng=123
)
@info "train - eval"
@time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=metric_evo, device, verbosity, print_every_n=100)
t_train_evo = @elapsed m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=metric_evo, device, verbosity, print_every_n=100)
@info "predict"
@time pred_evo = m_evo(x_train; device)
t_infer_evo = @elapsed pred_evo = m_evo(x_train; device)
_df = hcat(_df, DataFrame(
:train_evo => t_train_evo,
:infer_evo => t_infer_evo)
)
end
if run_xgb
@info "XGBoost"
params_xgb = Dict(
:num_round => max_nrounds,
:max_depth => max_depth - 1,
:eta => 0.05,
:objective => loss_xgb,
:print_every_n => 5,
:subsample => 0.5,
:colsample_bytree => 0.5,
:tree_method => tree_method, # hist/gpu_hist
:max_bin => 64,
)
@info "train"
dtrain = DMatrix(x_train, y_train)
watchlist = Dict("train" => DMatrix(x_train, y_train))
m_xgb = xgboost(dtrain; watchlist, nthread=nthreads, verbosity=0, eval_metric=metric_xgb, params_xgb...)
t_train_xgb = @elapsed m_xgb = xgboost(dtrain; watchlist, nthread=nthreads, verbosity=0, eval_metric=metric_xgb, params_xgb...)
@info "predict"
pred_xgb = XGBoost.predict(m_xgb, x_train)
t_infer_xgb = @elapsed pred_xgb = XGBoost.predict(m_xgb, x_train)
_df = hcat(_df, DataFrame(
:train_xgb => t_train_xgb,
:infer_xgb => t_infer_xgb)
)
end
append!(df, _df)
end
end
end
select!(df, Cols(:device, :nobs, :nfeats, :max_depth, r"train_", r"infer_"))
path = joinpath(@__DIR__, "results", "regressor-$device.csv")
CSV.write(path, df)
end
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 2752 | using Revise
using Statistics
using StatsBase: sample
using XGBoost
using EvoTrees
using BenchmarkTools
import CUDA
nobs = Int(1e6)
num_feat = Int(100)
nrounds = 200
num_class = 5
verbosity = 1
T = Float64
nthread = Base.Threads.nthreads()
@info "testing with: $nobs observations | $num_feat features. nthread: $nthread"
x_train = rand(T, nobs, num_feat)
y_train = rand(1:num_class, size(x_train, 1))
loss_xgb = "multi:softmax"
metric_xgb = "mlogloss"
metric_evo = :mlogloss
# xgboost aprams
params_xgb = Dict(
:num_round => nrounds,
:max_depth => 5,
:eta => 0.05,
:objective => loss_xgb,
:print_every_n => 5,
:subsample => 0.5,
:colsample_bytree => 0.5,
:tree_method => "hist",
:max_bin => 64,
:num_class => num_class,
)
@info "xgboost train:"
metrics = [metric_xgb]
dtrain = DMatrix(x_train, y_train .- 1);
watchlist = Dict("train" => DMatrix(x_train, y_train .- 1))
@time m_xgb = xgboost(dtrain; watchlist, nthread=nthread, verbosity=0, params_xgb...);
@info "xgboost predict:"
@time pred_xgb = XGBoost.predict(m_xgb, x_train);
# @btime XGBoost.predict($m_xgb, $x_train);
# EvoTrees params
params_evo = EvoTreeClassifier(;
nrounds=200,
alpha=0.5,
lambda=0.0,
gamma=0.0,
eta=0.05,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=0.5,
nbins=64)
@info "EvoTrees CPU"
device = "cpu"
# @info "train - no eval"
# @time m_evo = fit_evotree(params_evo; x_train, y_train, device, verbosity, print_every_n=100);
# @time m_evo = fit_evotree(params_evo; x_train, y_train, device, verbosity, print_every_n=100);
@info "train - eval"
@time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=metric_evo, device, print_every_n=100);
@time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=metric_evo, device, print_every_n=100);
@info "evotrees predict CPU:"
@time pred_evo = m_evo(x_train);
@btime m_evo($x_train);
@info "evotrees train GPU:"
device = "gpu"
# @info "train - no eval"
# @time m_evo = fit_evotree(params_evo; x_train, y_train, device, verbosity, print_every_n=100);
# @time m_evo = fit_evotree(params_evo; x_train, y_train, device, verbosity, print_every_n=100);
@info "train - eval"
@time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=metric_evo, device, print_every_n=100);
@time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=metric_evo, device, print_every_n=100);
# @btime fit_evotree($params_evo; x_train=$x_train, y_train=$y_train, x_eval=$x_train, y_eval=$y_train, metric=metric_evo);
@info "evotrees predict GPU:"
@time pred_evo = m_evo(x_train; device);
@btime m_evo($x_train; device); | EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 946 | using Statistics
using StatsBase: sample
using XGBoost
using EvoTrees
using BenchmarkTools
# prepare a dataset
X = rand(Int(2.e6), 100)
Y = rand(size(X, 1))
#######################
# EvoTrees
#######################
config = EvoTreeRegressor(
loss=:linear, metric=:none,
nrounds=100,
λ = 0.0, γ=0.0, η=0.05,
max_depth = 6, min_weight = 1.0,
rowsample=0.5, colsample=0.5, nbins=32)
@time model = fit_evotree(config, X, Y);
@time pred = EvoTrees.predict(model, X)
#######################
# xgboost
#######################
num_round = 100
param = ["max_depth" => 5,
"eta" => 0.05,
"objective" => "reg:linear",
"print_every_n" => 5,
"subsample" => 0.5,
"colsample_bytree" => 0.5,
"tree_method" => "hist",
"max_bin" => 32]
metrics = ["rmse"]
@time model_xgb = xgboost(X, num_round, label = Y, param = param, silent=1);
@time pred = XGBoost.predict(model_xgb, X)
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 13767 | using Statistics
using StatsBase: sample, quantile
using EvoTrees
using EvoTrees: sigmoid, logit
using Plots
using GraphRecipes
using Random: seed!
using StaticArrays
# prepare a dataset
seed!(123)
X = rand(1_000, 2) .* 2
Y = sin.(X[:,1] .* π) .+ X[:,2]
Y = Y .+ randn(size(Y)) .* 0.1 #logit(Y)
# Y = sigmoid(Y)
𝑖 = collect(1:size(X,1))
# make a grid
grid_size = 101
range = 2
X_grid = zeros(grid_size^2,2)
for j in 1:grid_size
for i in 1:grid_size
X_grid[grid_size*(j-1) + i,:] .= [(i-1) / (grid_size-1) * range, (j-1) / (grid_size-1) * range]
end
end
Y_grid = sin.(X_grid[:,1] .* π) .+ X_grid[:,2]
# train-eval split
𝑖_sample = sample(𝑖, size(𝑖, 1), replace = false)
train_size = 0.8
𝑖_train = 𝑖_sample[1:floor(Int, train_size * size(𝑖, 1))]
𝑖_eval = 𝑖_sample[floor(Int, train_size * size(𝑖, 1))+1:end]
X_train, X_eval = X[𝑖_train, :], X[𝑖_eval, :]
Y_train, Y_eval = Y[𝑖_train], Y[𝑖_eval]
# linear
params1 = EvoTreeRegressor(T=Float64,
loss=:linear, metric=:mse,
nrounds=100, nbins = 16,
λ=0.0, γ=0.0, η=0.05,
max_depth = 3, min_weight = 1.0,
rowsample=0.8, colsample=1.0)
edges = EvoTrees.get_edges(X_train, params1.nbins)
X_bin = EvoTrees.binarize(X_train, edges)
@time model = model = fit_evotree(params1, X_train, Y_train, X_eval = X_eval, Y_eval = Y_eval, print_every_n = 25)
# @btime model = grow_gbtree($X_train, $Y_train, $params1, X_eval = $X_eval, Y_eval = $Y_eval)
using BSON: @save
@save "blog/model_linear.bson" model
# @btime model = grow_gbtree($X_train, $Y_train, $params1, X_eval = $X_eval, Y_eval = $Y_eval, print_every_n = 25, metric=:mae)
@time pred_train_linear = predict(model, X_train)
# @time pred_eval_linear = predict(model, X_eval)
# mean(abs.(pred_train_linear .- Y_train))
# sqrt(mean((pred_train_linear .- Y_train) .^ 2))
μ = mean(Y_train)
x_perm_1 = sortperm(X_train[:,1])
x_perm_2 = sortperm(X_train[:,2])
p1 = plot(X_train[:,1], Y_train, ms = 3, zcolor=Y_train, color=cgrad(["darkred", "#33ccff"]), msw=0, background_color = "white", seriestype=:scatter, xaxis = ("var1"), yaxis = ("target"), leg = false, cbar=true)
p1_bin = plot(X_bin[:,1], Y_train, ms = 3, zcolor=Y_train, color=cgrad(["darkred", "#33ccff"]), msw=0, background_color = RGB(1, 1, 1), seriestype=:scatter, xaxis = ("var1"), yaxis = ("target"), legend = false, cbar=true, label = "")
plot!(fill(μ, params1.nbins), lw=3, color="#66ffcc", background_color = "white", seriestype=:line, label="predict", leg=true)
# savefig(p1, "var1.svg")
# plot!(X_train[:,1][x_perm_1], pred_train_linear[x_perm_1], color = "red", mswidth=0, msize=3, label = "Linear", st=:scatter, leg=false)
p2 = plot(X_train[:,2], Y_train, ms = 3, zcolor=Y_train, color=cgrad(["darkred", "#33ccff"]), msw=0, background_color = "white", seriestype=:scatter, xaxis = ("var2"), yaxis = ("target"), leg = false, cbar=true)
p2_bin = plot(X_bin[:,2], Y_train, ms = 3, zcolor=Y_train, color=cgrad(["darkred", "#33ccff"]), msw=0, background_color = "white", seriestype=:scatter, xaxis = ("var2"), yaxis = ("target"), leg = false, cbar=true, label="")
plot!(fill(μ, params1.nbins), lw=3, color="#66ffcc", background_color = "white", seriestype=:line, label="predict", leg = true)
# savefig(p2, "var2.svg")
# plot!(X_train[:,2][x_perm_2], pred_train_linear[x_perm_2], color = "red", mswidth=0, msize=3, st=:scatter, label = "Predict")
p = plot(p1,p2, layout=(2,1))
savefig(p, "blog/raw_one_ways.svg")
savefig(p, "blog/raw_one_ways.png")
p = plot(p1_bin, p2_bin, layout=(2,1))
savefig(p, "blog/bin_one_ways.svg")
savefig(p, "blog/bin_one_ways.png")
# train iteration
# plot left vs right points
left_id = X_bin[:,2] .== 1
p = plot(X_bin[left_id,2], Y_train[left_id], ms = 3, color="darkred", msw=0, background_color = "white", seriestype=:scatter, xaxis = ("var2"), yaxis = ("residual"), leg = true, cbar=true, label="left")
plot!(X_bin[.!left_id,2], Y_train[.!left_id], ms = 3, color="#33ccff", msw=0, background_color = "white", seriestype=:scatter, xaxis = ("var2"), yaxis = ("residual"), leg = true, cbar=true, label="right")
plot!(fill(μ, params1.nbins), lw=3, color="#66ffcc", background_color = "white", seriestype=:line, label="predict", leg = true)
# residuals
residuals = Y_train .- μ
left_id = X_bin[:,2] .== 1
p = plot(X_bin[left_id,2], residuals[left_id], ms = 3, color="darkred", msw=0, background_color = "white", seriestype=:scatter, xaxis = ("var2"), yaxis = ("residual"), leg = true, cbar=true, label="left")
plot!(X_bin[.!left_id,2], residuals[.!left_id], ms = 3, color="#33ccff", msw=0, background_color = "white", seriestype=:scatter, xaxis = ("var2"), yaxis = ("residual"), leg = true, cbar=true, label="right")
plot!(fill(0, params1.nbins), lw=3, color="#66ffcc", background_color = "white", seriestype=:line, label="", leg = true)
savefig(p, "blog/first_split.svg")
savefig(p, "blog/first_split.png")
left_res = residuals[left_id]
function loss(x::Vector, val)
sum((x .- val).^2)
end
eval_pts = -2.1:0.01:0.5
left_loss = loss.(Ref(left_res), eval_pts)
p = plot(left_res, left_res .* 0, ms = 5, color="darkred", msw=0, background_color = "white", seriestype=:scatter, xaxis = ("predict"), yaxis = ("loss"), leg = true, cbar=true, label="observation")
# plot!(left_res, left_loss, ms = 3, color="#33ccff", msw=0, background_color = "white", seriestype=:scatter, xaxis = ("residual"), yaxis = ("loss"), leg = true, cbar=true, label="")
plot!(eval_pts, left_loss, lw=3, color="#66ffcc", background_color = "white", seriestype=:line, label="loss", leg = true)
savefig(p, "blog/left_parabole.svg")
savefig(p, "blog/left_parabole.png")
loss(left_res, 0.0) - loss(left_res, mean(left_res))
# add single pt parabol
left_res_1 = sort(left_res)[35]
left_loss_1 = loss.(Ref([left_res_1]), eval_pts)
plot!(eval_pts, left_loss_1, lw=0.5, color="#33ccff", background_color = "white", seriestype=:line, label="", leg = true)
###########################
# raw compute
###########################
𝑖 = collect(1:size(X_train,1))
δ, δ² = zeros(SVector{model.K, Float64}, size(X_train, 1)), zeros(SVector{model.K, Float64}, size(X_train, 1))
𝑤 = zeros(SVector{1, Float64}, size(X_train, 1)) .+ 1
pred = zeros(SVector{model.K,Float64}, size(X_train,1)) .+= μ
EvoTrees.update_grads!(params1.loss, params1.α, pred, Y_train, δ, δ², 𝑤)
∑δ, ∑δ², ∑𝑤 = sum(δ[left_id]), sum(δ²[left_id]), sum(𝑤[left_id])
gain = EvoTrees.get_gain(params1.loss, ∑δ, ∑δ², ∑𝑤, params1.λ)
#####################################
# 3D visualisation of data
#####################################
# p = plot(X_train[:,1], X_train[:,2], Y_train, zcolor=Y_train, color=cgrad(["red","#3399ff"]), msize=5, markerstrokewidth=0, leg=false, cbar=true, w=1, st=:scatter)
p = plot(X_grid[:,1], X_grid[:,2], Y_grid, zcolor=Y_grid, color=cgrad(["#555555", "#eeeeee"]), msize=5, markerstrokewidth=0, leg=false, cbar=true, st=:scatter, xaxis="var1", yaxis="var2")
plot!(X_train[:,1], X_train[:,2], Y_train, zcolor=Y_train, color=cgrad(["darkred", "#33ccff"]), msize=4, markerstrokewidth=0, st=:scatter)
savefig(p, "blog/data_3D.svg")
savefig(p, "blog/data_3D.png")
plot(X_train[:,1], X_train[:,2], pred_train_linear, zcolor=Y_train, m=(5, 0.9, :rainbow, Plots.stroke(0)), leg=false, cbar=true, w=1, st=:scatter)
plot(X_train[:,1], X_train[:,2], pred_train_linear, zcolor=Y_train, st=[:surface], leg=false, cbar=true, fillcolor=:rainbow, markeralpha=1.0)
p_bin = plot(X_bin[:,1], X_bin[:,2], Y_train, zcolor=Y_train, color=cgrad(["darkred", "#33ccff"]), msize=4, markerstrokewidth=0, st=:scatter, leg=false, cbar=true)
gr()
params1 = EvoTreeRegressor(
loss=:linear, metric=:mse,
nrounds=100, nbins = 100,
λ = 0.0, γ=0.0, η=0.5,
max_depth = 2, min_weight = 1.0,
rowsample=0.5, colsample=1.0)
anim = @animate for i=1:20
params1.nrounds = (i-1)*5+1
model = fit_evotree(params1, X_train, Y_train, X_eval = X_eval, Y_eval = Y_eval, print_every_n = Inf)
pred_train_linear = predict(model, X_train)
x_perm = sortperm(X_train[:,1])
plot(X_train, Y_train, ms = 1, mcolor = "gray", mscolor = "lightgray", background_color = RGB(1, 1, 1), seriestype=:scatter, xaxis = ("feature"), yaxis = ("target"), legend = true, label = "")
plot!(X_train[:,1][x_perm], pred_train_linear[x_perm], color = "navy", linewidth = 1.5, label = "Linear")
end
gif(anim, "blog/anim_fps1.gif", fps = 1)
# tree vec
function treevec(tree)
source, target = zeros(Int, max(1, length(tree.nodes)-1)), zeros(Int, max(1, length(tree.nodes)-1))
count_s, count_t = 1, 1
for i in 1:length(tree.nodes)
if tree.nodes[i].split
source[count_s] = i
source[count_s+1] = i
target[count_t] = tree.nodes[i].left
target[count_t+1] = tree.nodes[i].right
count_s += 2
count_t += 2
elseif i ==1
source[i] = i
target[i] = i
end
end
return source, target
end
# plot tree
function nodenames(tree)
names = []
for i in 1:length(tree.nodes)
if tree.nodes[i].split
push!(names, "feat: " * string(tree.nodes[i].feat) * "\n< " * string(round(tree.nodes[i].cond, sigdigits=3)))
else
push!(names, "pred:\n" * string(round(tree.nodes[i].pred[1], sigdigits=3)))
end
end
return names
end
tree1 = model.trees[2]
source, target = treevec(tree1)
nodes = nodenames(tree1)
seed!(1)
# p1 = graphplot(source, target, method=:tree, names = nodes, linecolor=:brown, nodeshape=:hexagon, fontsize=8, fillcolor="#66ffcc")
p1 = graphplot(source, target, method=:buchheim, names = nodes, linecolor=:brown, nodeshape=:hexagon, fontsize=8, nodecolor="#66ffcc")
tree1 = model.trees[3]
source, target = treevec(tree1)
nodes = nodenames(tree1)
seed!(1)
p2 = graphplot(source, target, method=:buchheim, names = nodes, linecolor=:brown, nodeshape=:hexagon, fontsize=8, nodecolor="#66ffcc")
tree1 = model.trees[50]
source, target = treevec(tree1)
nodes = nodenames(tree1)
seed!(1)
p3 = graphplot(source, target, method=:buchheim, names = nodes, linecolor=:brown, nodeshape=:hexagon, fontsize=8, nodecolor="#66ffcc")
tree1 = model.trees[90]
source, target = treevec(tree1)
nodes = nodenames(tree1)
seed!(1)
p4 = graphplot(source, target, method=:buchheim, names = nodes, edgecolor=:black, nodeshape=:hexagon, fontsize=8, nodecolor="#66ffcc")
default(size=(1600, 1600))
seed!(1)
fills = sample(["#33ffcc", "#99ccff"], length(source), replace=true)
fills = sample(["lightgray", "#33ffcc"], length(source), replace=true)
p4 = graphplot(source, target, method=:buchheim, root=:top, names = nodes, edgecolor=:black, nodeshape=:hexagon, fontsize=9, axis_buffer=0.05, nodesize=0.025, nodecolor=fills)
p = plot(p1,p2,p3,p4)
savefig(p, "blog/tree_group.svg")
savefig(p1, "blog/tree_1.svg")
savefig(p, "blog/tree_group.png")
savefig(p1, "blog/tree_1.png")
################################
# animation on one way plot
################################
# prepare a dataset
features = rand(10_000) .* 5
X = reshape(features, (size(features)[1], 1))
Y = sin.(features) .* 0.5 .+ 0.5
Y = logit(Y) + randn(size(Y))
Y = sigmoid(Y)
𝑖 = collect(1:size(X,1))
# train-eval split
𝑖_sample = sample(𝑖, size(𝑖, 1), replace = false)
train_size = 0.8
𝑖_train = 𝑖_sample[1:floor(Int, train_size * size(𝑖, 1))]
𝑖_eval = 𝑖_sample[floor(Int, train_size * size(𝑖, 1))+1:end]
X_train, X_eval = X[𝑖_train, :], X[𝑖_eval, :]
Y_train, Y_eval = Y[𝑖_train], Y[𝑖_eval]
# linear
params1 = EvoTreeRegressor(
loss=:linear, metric=:mse,
nrounds=10, nbins = 32,
λ = 0.0, γ=0.0, η=0.5,
max_depth = 3, min_weight = 1.0,
rowsample=0.5, colsample=1.0)
@time model = fit_evotree(params1, X_train, Y_train, X_eval = X_eval, Y_eval = Y_eval, print_every_n = 25)
anim = @animate for i=0:4
params1.nrounds = i
# i == 1 ? params1.η = 0.001 : params1.η = 0.5
model = fit_evotree(params1, X_train, Y_train, X_eval = X_eval, Y_eval = Y_eval, print_every_n = Inf)
pred_train_linear = predict(model, X_train)
x_perm = sortperm(X_train[:,1])
plot(X_train, Y_train, ms = 1, mcolor = "gray", mscolor = "lightgray", background_color = RGB(1, 1, 1), seriestype=:scatter, xaxis = ("feature"), yaxis = ("target"), legend = true, label = "")
plot!(X_train[:,1][x_perm], pred_train_linear[x_perm], color = "navy", linewidth = 1.5, label = "Linear")
end
gif(anim, "blog/anim_fps1.gif", fps = 1)
ps = [p1,p2,p3,p4]
anim = @animate for i=0:4
# i == 1 ? params1.η = 0.001 : params1.η = 0.5
# if i == 1
# p = plot(p1)
# elseif i ==2
# p = plot(p1,p2)
# end
if i==0
plot(foreground_color_subplot=:white)
else
plot(ps[1:i]...)
end
end
gif(anim, "blog/anim_tree.gif", fps = 1)
tree1 = model.trees[2]
source, target = treevec(tree1)
nodes = nodenames(tree1)
seed!(1)
p1 = graphplot(source, target, method=:tree, names = nodes, edgecolor=:brown, nodeshape=:hexagon, fontsize=8, nodecolor="#66ffcc", nodestrokewidth=0)
tree1 = model.trees[3]
source, target = treevec(tree1)
nodes = nodenames(tree1)
seed!(1)
p2 = graphplot(source, target, method=:tree, names = nodes, edgecolor=:brown, nodeshape=:hexagon, fontsize=8, nodecolor="#66ffcc", nodestrokewidth=0)
tree1 = model.trees[4]
source, target = treevec(tree1)
nodes = nodenames(tree1)
seed!(1)
p3 = graphplot(source, target, method=:tree, names = nodes, edgecolor=:brown, nodeshape=:hexagon, fontsize=8, nodecolor="#66ffcc", nodestrokewidth=0)
tree1 = model.trees[5]
source, target = treevec(tree1)
nodes = nodenames(tree1)
seed!(1)
p4 = graphplot(source, target, method=:tree, names = nodes, edgecolor=:brown, nodeshape=:hexagon, fontsize=8, nodecolor="#66ffcc", nodestrokewidth=0)
p = plot(p1,p2,p3,p4)
savefig(p, "blog/tree_group.svg")
savefig(p1, "blog/tree_1.svg")
savefig(p, "blog/tree_group.png")
savefig(p1, "blog/tree_1.png")
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 1008 | push!(LOAD_PATH, "../src/")
using Documenter
using EvoTrees
pages = [
"Introduction" => "index.md",
"Models" => "models.md",
"API" => [
"Public" => "api.md",
"Internals" => "internals.md"
],
"Tutorials" => [
"Regression - Boston" => "tutorials/regression-boston.md",
"Logistic Regression - Titanic" => "tutorials/logistic-regression-titanic.md",
"Classification - IRIS" => "tutorials/classification-iris.md",
"Ranking - Yahoo! LTRC" => "tutorials/ranking-LTRC.md",
"Internal API" => "tutorials/examples-API.md",
"MLJ API" => "tutorials/examples-MLJ.md"]
]
makedocs(
sitename="EvoTrees.jl",
authors="Jeremie Desgagne-Bouchard and contributors.",
format=Documenter.HTML(
sidebar_sitename=false,
edit_link="main",
assets=["assets/style.css"]
),
pages=pages,
modules=[EvoTrees],
)
deploydocs(repo="github.com/Evovest/EvoTrees.jl.git",
target="build",
devbranch="main") | EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 9000 | using Revise
using Tables
using DataFrames
using MLJBase
using StatsBase: sample, mean, quantile
using Statistics
using CategoricalArrays
using Distributions
using EvoTrees
using EvoTrees: logit, sigmoid
##################################################
### Regression - small data
##################################################
features = rand(10_000) .* 5 .- 2
X = reshape(features, (size(features)[1], 1))
Y = sin.(features) .* 0.5 .+ 0.5
Y = logit(Y) + randn(size(Y))
Y = sigmoid(Y)
y = Y
X = Tables.table(X)
X = DataFrame(X)
# @load EvoTreeRegressor
# linear regression
tree_model = EvoTreeRegressor(max_depth = 5, eta = 0.05, nrounds = 5, rowsample = 0.5)
# logistic regression
# tree_model = EvoTreeRegressor(loss = :logistic, max_depth = 5, eta = 0.05, nrounds = 10)
# quantile regression
# tree_model = EvoTreeRegressor(
# loss = :quantile,
# alpha = 0.75,
# max_depth = 5,
# eta = 0.05,
# nrounds = 10,
# )
tree = machine(tree_model, X, y)
train, test = partition(eachindex(y), 0.7, shuffle = true); # 70:30 split
fit!(tree, rows = train, verbosity = 1);
tree.model.nrounds += 5
fit!(tree, rows = train, verbosity = 1);
# predict on train data
pred_train = predict(tree, selectrows(X, train))
println(mean(abs.(pred_train - selectrows(Y, train))))
# predict on test data
pred_test = predict(tree, selectrows(X, test))
println(mean(abs.(pred_test - selectrows(Y, test))))
##################################################
### classif
##################################################
X, y_train = @load_crabs
x_train = matrix(X)
using CUDA
CUDA.allowscalar(false)
# define hyperparameters
config = EvoTreeClassifier(
max_depth = 4,
eta = 0.05,
lambda = 0.0,
gamma = 0.0,
nbins = 32,
nrounds = 200,
)
model = fit_evotree(config; x_train, y_train);
model = fit_evotree(config; x_train, y_train, x_eval = x_train, y_eval = y_train, metric=:mlogloss, print_every_n=10, early_stopping_rounds=25);
pred = model(x_train)
pred_cat = pred .> 0.5
sum((y_train .== "B") .== pred_cat[:, 1]) / length(y_train)
# @load EvoTreeRegressor
mach = machine(config, X, y_train)
train, test = partition(eachindex(y_train), 0.7, shuffle = true); # 70:30 split
fit!(mach, rows = train, verbosity = 1)
rpt = report(mach)
MLJBase.feature_importances(config, mach.fitresult, rpt)
mach.model.nrounds += 10
fit!(mach, rows = train, verbosity = 1)
rpt = report(mach)
MLJBase.feature_importances(config, mach.fitresult, rpt)
pred_train = EvoTrees.predict(mach, selectrows(X, train))
pred_train_mode = predict_mode(mach, selectrows(X, train))
println(cross_entropy(pred_train, selectrows(y_train, train)) |> mean)
println(sum(pred_train_mode .== y_train[train]) / length(train))
pred_test = EvoTrees.predict(mach, selectrows(X, test))
pred_test_mode = predict_mode(mach, selectrows(X, test))
println(cross_entropy(pred_test, selectrows(y_train, test)) |> mean)
println(sum(pred_test_mode .== y_train[test]) / length(test))
pred_test_mode = predict_mode(mach, selectrows(X, test))
# using LossFunctions, Plots
# evo_model = EvoTreeClassifier(max_depth=6, η=0.05, λ=1.0, γ=0.0, nrounds=10, nbins=64)
# evo = machine(evo_model, X, y)
# r = range(evo_model, :nrounds, lower=1, upper=500)
# @time curve = learning_curve!(evo, range=r, resolution=10, measure=HingeLoss())
# plot(curve.parameter_values, curve.measurements)
##################################################
### regression - Larger data
##################################################
features = rand(1_000_000, 100)
# features = rand(100, 10)
X = features
Y = rand(size(X, 1))
𝑖 = collect(1:size(X, 1))
# train-eval split
𝑖_sample = sample(𝑖, size(𝑖, 1), replace = false)
train_size = 0.8
𝑖_train = 𝑖_sample[1:floor(Int, train_size * size(𝑖, 1))]
𝑖_eval = 𝑖_sample[floor(Int, train_size * size(𝑖, 1))+1:end]
X_train, X_eval = X[𝑖_train, :], X[𝑖_eval, :]
Y_train, Y_eval = Y[𝑖_train], Y[𝑖_eval]
# @load EvoTreeRegressor
tree_model = EvoTreeRegressor(
loss = :linear,
metric = :mae,
nrounds = 10,
λ = 0.0,
γ = 0.0,
η = 0.1,
max_depth = 6,
min_weight = 1.0,
rowsample = 0.5,
colsample = 0.5,
nbins = 32,
)
X = Tables.table(X);
X = Tables.rowtable(X);
# X = Tables.columntable(X)
# X_matrix = MLJBase.matrix(X)
# typeof(X)
@time tree = machine(tree_model, X, Y);
train, test = partition(eachindex(Y), 0.8, shuffle = true); # 70:30 split
@time fit!(tree, rows = train, verbosity = 1, force = true)
using LossFunctions
using MLJ
r = range(tree_model, :nrounds, lower = 1, upper = 100)
m = rms
@time curve = learning_curve!(evo, range = r, resolution = 100, measure = m)
tree.model.nrounds += 1
@time update(tree.model, 0, tree.fitresult, tree.cache, X, Y);
tree.model.nrounds += 1
@time fit!(tree, rows = train, verbosity = 1)
# @time MLJBase.fit!(tree, rows=train, verbosity=1)
# yhat = MLJBase.predict(tree.model, tree.fitresult, MLJ.selectrows(X,test))
pred_train = predict(tree, selectrows(X, train))
mean(abs.(pred_train - selectrows(Y, train)))
##################################################
### count - Larger data
##################################################
features = rand(100_000, 100)
# features = rand(100, 10)
X = features
Y = rand(UInt8, size(X, 1))
𝑖 = collect(1:size(X, 1))
# train-eval split
𝑖_sample = sample(𝑖, size(𝑖, 1), replace = false)
train_size = 0.8
𝑖_train = 𝑖_sample[1:floor(Int, train_size * size(𝑖, 1))]
𝑖_eval = 𝑖_sample[floor(Int, train_size * size(𝑖, 1))+1:end]
X_train, X_eval = X[𝑖_train, :], X[𝑖_eval, :]
Y_train, Y_eval = Y[𝑖_train], Y[𝑖_eval]
# @load EvoTreeRegressor
tree_model = EvoTreeCount(
loss = :poisson,
metric = :poisson,
nrounds = 10,
λ = 0.0,
γ = 0.0,
η = 0.1,
max_depth = 6,
min_weight = 1.0,
rowsample = 0.5,
colsample = 0.5,
nbins = 32,
)
X = Tables.table(X)
X = Tables.rowtable(X)
X = Tables.columntable(X)
X_matrix = MLJBase.matrix(X)
# typeof(X)
@time tree = machine(tree_model, X, Y)
train, test = partition(eachindex(Y), 0.8, shuffle = true); # 70:30 split
@time fit!(tree, rows = train, verbosity = 1, force = true)
tree.model.nrounds += 10
@time MLJBase.update(tree.model, 0, tree.fitresult, tree.cache, X, Y)
tree.model.nrounds += 10
@time fit!(tree, rows = train, verbosity = 1)
# @time MLJBase.fit!(tree, rows=train, verbosity=1)
# yhat = MLJBase.predict(tree.model, tree.fitresult, MLJ.selectrows(X,test))
pred = predict(tree, selectrows(X, train))
pred_mean = predict_mean(tree, selectrows(X, train))
pred_mode = predict_mode(tree, selectrows(X, train))
##################################################
### Gaussian - Larger data
##################################################
features = rand(100_000, 100)
# features = rand(100, 10)
X = features
Y = rand(size(X, 1))
𝑖 = collect(1:size(X, 1))
# train-eval split
𝑖_sample = sample(𝑖, size(𝑖, 1), replace = false)
train_size = 0.8
𝑖_train = 𝑖_sample[1:floor(Int, train_size * size(𝑖, 1))]
𝑖_eval = 𝑖_sample[floor(Int, train_size * size(𝑖, 1))+1:end]
X_train, X_eval = X[𝑖_train, :], X[𝑖_eval, :]
Y_train, Y_eval = Y[𝑖_train], Y[𝑖_eval]
# @load EvoTreeRegressor
tree_model = EvoTreeGaussian(
loss = :gaussian,
metric = :gaussian,
nrounds = 10,
λ = 0.0,
γ = 0.0,
η = 0.1,
max_depth = 6,
min_weight = 1.0,
rowsample = 0.5,
colsample = 0.5,
nbins = 32,
)
X = Tables.table(X)
X_matrix = MLJBase.matrix(X)
# typeof(X)
@time tree = machine(tree_model, X, Y)
train, test = partition(eachindex(Y), 0.8, shuffle = true); # 70:30 split
@time fit!(tree, rows = train, verbosity = 1, force = true)
tree.model.nrounds += 10
@time MLJBase.update(tree.model, 0, tree.fitresult, tree.cache, X, Y)
tree.model.nrounds += 10
@time fit!(tree, rows = train, verbosity = 1)
# @time MLJBase.fit!(tree, rows=train, verbosity=1)
# yhat = MLJBase.predict(tree.model, tree.fitresult, MLJ.selectrows(X,test))
pred = predict(tree, selectrows(X, train))
pred_mean = predict_mean(tree, selectrows(X, train))
pred_mode = predict_mode(tree, selectrows(X, train))
mean(abs.(pred_mean - selectrows(Y, train)))
q_20 = quantile.(pred, 0.20)
q_20 = quantile.(pred, 0.80)
#########################################
# MLJ2 test
#########################################
using EvoTrees
using MLJModelInterface
using MLJBase
using StatsBase: sample, mean, quantile
using Tables
X = rand(1_000_000, 100);
Y = rand(size(X, 1))
# @load EvoTreeRegressor
tree_model = EvoTreeRegressor(
loss = :linear,
metric = :mae,
nrounds = 10,
λ = 0.0,
γ = 0.0,
η = 0.1,
max_depth = 6,
min_weight = 1.0,
rowsample = 0.5,
colsample = 0.5,
nbins = 32,
)
X = Tables.table(X);
# X = Tables.rowtable(X);
# X = Tables.columntable(X);
# X_matrix = MLJBase.matrix(X);
# typeof(X)
@time tree = machine(tree_model, X, Y);
train, test = partition(eachindex(Y), 0.8, shuffle = true); # 70:30 split
@time fit!(tree, rows = train, verbosity = 1, force = false)
tree.model.nrounds += 1
@time fit!(tree, rows = train, verbosity = 1)
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 2415 | using Revise
using EvoTrees
using DataFrames
using CategoricalArrays: categorical
import CUDA
using Base.Threads: nthreads, @threads
using BenchmarkTools
using Random: seed!
# using StatsBase
# x1 = rand(Bool, 10)
# nbins = 2
# edges = sort(unique(quantile(skipmissing(x1), (1:nbins-1) / nbins)))
# searchsortedfirst(edges, edges[1])
# searchsortedfirst(edges, 1.0)
# searchsortedfirst(edges, edges[9] + 0.01)
seed!(123)
nrounds = 20
nobs = Int(1e6)
nfeats_num = Int(100)
T = Float32
nthread = Base.Threads.nthreads()
@info "testing with: $nobs observations | $nfeats_num features."
x_train = rand(T, nobs, nfeats_num);
y_train = rand(T, nobs);
dtrain = DataFrame(x_train, :auto);
dtrain[:, :y] = y_train;
# dtrain[:, :x_cat_1] = rand(["lvl1", "lvl2", "lvl3"], nobs);
# transform!(dtrain, "x_cat_1" => (x -> categorical(x, ordered = false)) => "x_cat_1")
# levels(dtrain.x_cat_1)
# levelcode.(dtrain.x_cat_1)
# isordered(dtrain.x_cat_1)
# eltype.(eachcol(dtrain))
# typeof.(eachcol(dtrain))
# @time for col in eachcol(dtrain)
# @info typeof(col)
# end
# @time for name in names(dtrain)
# @info typeof(dtrain[:, name])
# end
@info nthread
loss = "linear"
if loss == "linear"
loss_evo = :mse
metric_evo = :mae
elseif loss == "logistic"
loss_evo = :logloss
metric_evo = :logloss
end
hyper = EvoTreeRegressor(
T=T,
loss=loss_evo,
nrounds=nrounds,
alpha=0.5,
lambda=0.0,
gamma=0.0,
eta=0.05,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=0.5,
nbins=64,
rng=123,
)
target_name = "y"
device = "cpu"
@info "init"
@time model, cache = EvoTrees.init(hyper, dtrain; target_name);
@info "pred"
@time pred = model(dtrain);
# cache.edges[11]
# cache.featbins
# cache.feattypes
# cache.nodes[1].gains[1]
# model.trees[1]
@info "grow_evotree!"
@time EvoTrees.grow_evotree!(model, cache, hyper, EvoTrees.CPU);
# @btime EvoTrees.grow_evotree!(model, cache, hyper);
@info "fit_evotree"
@time m = fit_evotree(hyper, dtrain; target_name, device, verbosity=false);
# @btime fit_evotree(hyper, dtrain; target_name, verbosity = false);
@time m = fit_evotree(hyper, dtrain; target_name, deval=dtrain, metric=metric_evo, device, print_every_n=100, verbosity=false);
# @btime m = fit_evotree(hyper, dtrain; target_name, deval=dtrain, metric=metric_evo, device, print_every_n=100, verbosity = false);
@time pred = m(dtrain);
# @btime m($dtrain);
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 2096 | using Revise
using EvoTrees
using CSV
using DataFrames
using CategoricalArrays
import CUDA
using Base.Iterators: partition
using Base.Threads: nthreads, @threads
using BenchmarkTools
using Random: seed!
# using StatsBase
# x1 = rand(Bool, 10)
# nbins = 2
# edges = sort(unique(quantile(skipmissing(x1), (1:nbins-1) / nbins)))
# searchsortedfirst(edges, edges[1])
# searchsortedfirst(edges, 1.0)
# searchsortedfirst(edges, edges[9] + 0.01)
seed!(123)
nrounds = 20
nobs = Int(1e6)
nfeats_num = Int(100)
T = Float32
nthread = Base.Threads.nthreads()
@info "testing with: $nobs observations | $nfeats_num features."
x_train = rand(T, nobs, nfeats_num);
y_train = rand(T, nobs);
dtrain = DataFrame(x_train, :auto);
dtrain[:, :y] = y_train;
# dtrain[:, :x_cat_1] = rand(["lvl1", "lvl2", "lvl3"], nobs);
# transform!(dtrain, "x_cat_1" => (x -> categorical(x, ordered=false)) => "x_cat_1")
@info nthread
loss = "linear"
if loss == "linear"
loss_evo = :mse
metric_evo = :mae
elseif loss == "logistic"
loss_evo = :logloss
metric_evo = :logloss
end
hyper = EvoTreeRegressor(
T=T,
loss=loss_evo,
nrounds=nrounds,
alpha=0.5,
lambda=0.0,
gamma=0.0,
eta=0.05,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=0.5,
nbins=64,
rng=123,
)
target_name = "y"
device = "gpu"
CUDA.allowscalar(false)
@time model, cache = EvoTrees.init(hyper, dtrain, EvoTrees.GPU; target_name);
CUDA.@time model, cache = EvoTrees.init(hyper, dtrain, EvoTrees.GPU; target_name);
@time EvoTrees.grow_evotree!(model, cache, hyper, EvoTrees.GPU);
@btime EvoTrees.grow_evotree!(model, cache, hyper, EvoTrees.GPU);
@time m = fit_evotree(hyper, dtrain; target_name, device, verbosity=false);
# @btime fit_evotree(hyper, dtrain; target_name, verbosity = false);
@time m = fit_evotree(hyper, dtrain; target_name, deval=dtrain, metric=metric_evo, device, print_every_n=100, verbosity=false);
@btime m = fit_evotree(hyper, dtrain; target_name, deval=dtrain, metric=metric_evo, device, print_every_n=100, verbosity=false);
@time pred = m(dtrain);
@btime m($dtrain); | EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 2891 | using Plots
using Statistics
using StatsBase: sample
using Distributions
using Revise
using EvoTrees
features = rand(Int(1.25e4), 1)
# prepare a dataset
# features = rand(100, 10)
X = features
Y = randn(size(X, 1)) .* 0.1
Y[X[:,1] .< 0.2] .*= 2
Y[(X[:,1] .>= 0.4) .& (X[:,1] .< 0.6)] .*= 5
Y[(X[:,1] .>= 0.9)] .*= 5
𝑖 = collect(1:size(X,1))
Y .*= 0.01
# train-eval split
𝑖_sample = sample(𝑖, size(𝑖, 1), replace = false)
train_size = 0.8
𝑖_train = 𝑖_sample[1:floor(Int, train_size * size(𝑖, 1))]
𝑖_eval = 𝑖_sample[floor(Int, train_size * size(𝑖, 1))+1:end]
X_train, X_eval = X[𝑖_train, :], X[𝑖_eval, :]
Y_train, Y_eval = Y[𝑖_train], Y[𝑖_eval]
# train model
params1 = EvoTreeGaussian(
loss=:gaussian, metric=:gaussian,
nrounds=200,
λ = 1.0, γ=1.0, η=0.5,
max_depth = 4, min_weight = 100.0,
rowsample=1.0, colsample=1.0, nbins=64)
@time model = fit_evotree(params1, X_train, Y_train, X_eval=X_eval, Y_eval=Y_eval, print_every_n = 10);
# @time model = fit_evotree(params1, X_train, Y_train, print_every_n = 10);
@time pred_train = EvoTrees.predict(model, X_train)
@time pred_train_gauss = EvoTrees.predict(params1, model, X_train)
pred_gauss = [Distributions.Normal(pred_train[i,1], pred_train[i,2]) for i in 1:size(pred_train,1)]
pred_q90 = quantile.(pred_gauss, 0.9)
pred_q10 = quantile.(pred_gauss, 0.1)
mean(Y_train .< pred_q90)
mean(Y_train .< pred_q10)
x_perm = sortperm(X_train[:,1])
plot(X_train[:, 1], Y_train, ms = 1, mcolor = "gray", mscolor = "lightgray", background_color = RGB(1, 1, 1), seriestype=:scatter, xaxis = ("feature"), yaxis = ("target"), legend = true, label = "")
plot!(X_train[:,1][x_perm], pred_train[x_perm, 1], color = "navy", linewidth = 1.5, label = "mu")
plot!(X_train[:,1][x_perm], pred_train[x_perm, 2], color = "blue", linewidth = 1.5, label = "sigma")
plot!(X_train[:,1][x_perm], pred_q10[x_perm, 1], color = "darkred", linewidth = 1.5, label = "q10")
plot!(X_train[:,1][x_perm], pred_q90[x_perm, 1], color = "green", linewidth = 1.5, label = "q90")
savefig("figures/regression_gaussian_v1.png")
# compare with zygote
using Zygote
pred = [0.0, log(1.0)]
target = 0.1
δ1 = (target - pred[1]) / max(1e-8, exp(2*pred[2]))
δ2 = (1 - (pred[1] - target)^2 / max(1e-8, exp(2*pred[2])))
δ²1 = 1 / max(1e-8, exp(2*pred[2]))
δ²2 = 2 / max(1e-8, exp(2*pred[2])) * (pred[1] - target)^2
lpdf(x,μ,σ) = -log(σ) - log(2π)/2 - 1/2*((x-μ)/σ)^2
lpdf(0, pred[1], pred[2])
lpdf2(x,μ,lσ) = -log(exp(lσ)) - log(2π)/2 - 1/2*((x-μ)/exp(lσ))^2
lpdf2(0, pred[1], pred[2])
n1 = Normal(0, 1)
Distributions.logpdf(n1, 0)
# gradient(lpdf, target, pred[1], pred[2])[2:end]
gradient(lpdf2, target, pred[1], pred[2])[2:end]
Zygote.hessian(lpdf2, target, pred[1], pred[2])
gradient_lpdf(x,pred) = gradient(lpdf2, x, pred[1], pred[2])[3]
hessian_lpdf(x,pred) = gradient(gradient_lpdf, x, pred)[1]
gradient_lpdf(target, pred)
hessian_lpdf(target, pred)
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 7830 | using Revise
using EvoTrees
# using CUDA
using CUDA
using BenchmarkTools
# using Flux
# using GeometricFlux
nobs = 500_000
nbins = 64
ncol = 50
h∇ = CUDA.zeros(Float32, 3, nbins, ncol)
∇ = CUDA.rand(Float32, 3, nobs)
x_bin = CuArray(rand(UInt32.(1:nbins), nobs, ncol))
𝑖 = CuArray(UInt32.(1:nobs))
𝑗 = CuArray(UInt32.(1:ncol))
h∇ .= 0
CUDA.@time EvoTrees.update_hist_gpu!(h∇, ∇, x_bin, 𝑖, 𝑗)
@btime EvoTrees.update_hist_gpu!($h∇, $∇, $x_bin, $𝑖, $𝑗)
items = Int(1e6)
hist = zeros(Float32, nbins, ncol)
δ = rand(Float32, items)
idx = rand(1:nbins, items, ncol)
𝑖 = collect(1:items)
𝑗 = collect(1:ncol)
hist_gpu = CuArray(hist)
δ_gpu = CuArray(δ)
idx_gpu = CuArray(idx)
𝑖_gpu = CuArray(𝑖)
𝑗_gpu = CuArray(𝑗)
# CPU
function hist_cpu!(hist, δ, idx, 𝑖, 𝑗)
Threads.@threads for j in 𝑗
@inbounds for i in 𝑖
hist[idx[i], j] += δ[i]
end
end
return
end
function kernel_1!(h::CuDeviceMatrix{T}, x::CuDeviceVector{T}, id, 𝑖, 𝑗) where {T<:AbstractFloat}
i = threadIdx().x + (blockIdx().x - 1) * blockDim().x
j = threadIdx().y + (blockIdx().y - 1) * blockDim().y
if i <= length(𝑖) && j <= length(𝑗)
@inbounds k = Base._to_linear_index(h, id[𝑖[i], 𝑗[j]], 𝑗[j])
@inbounds CUDA.atomic_add!(pointer(h, k), x[𝑖[i]])
end
return
end
# base approach - block built along the cols first, the rows (limit collisions)
function hist_gpu_1!(h::CuMatrix{T}, x::CuVector{T}, id::CuMatrix{Int}, 𝑖, 𝑗; MAX_THREADS=1024) where {T<:AbstractFloat}
thread_j = min(MAX_THREADS, length(𝑗))
thread_i = min(MAX_THREADS ÷ thread_j, length(𝑖))
threads = (thread_i, thread_j)
blocks = ceil.(Int, (length(𝑖), length(𝑗)) ./ threads)
@cuda blocks=blocks threads=threads kernel_1!(h, x, id, 𝑖, 𝑗)
return
end
@time hist_cpu!(hist, δ, idx)
CUDA.@time hist_gpu_1!(hist_gpu, δ_gpu, idx_gpu, 𝑖_gpu, 𝑗_gpu, MAX_THREADS=1024)
nbins = 32
ncol = 100
items = Int(2e6)
K = 1
hist = zeros(Float32, nbins, 3, ncol)
δ = rand(Float32, items, 3)
idx = rand(1:nbins, items, ncol)
𝑖 = collect(1:items)
𝑗 = collect(1:ncol)
hist_gpu = CuArray(hist)
δ_gpu = CuArray(δ)
idx_gpu = CuArray(idx)
𝑖_gpu = CuArray(𝑖)
𝑗_gpu = CuArray(𝑗)
function kernel_2!(h::CuDeviceArray{T,3}, x::CuDeviceMatrix{T}, id, 𝑖, 𝑗) where {T<:AbstractFloat}
i = threadIdx().x + (blockIdx().x - 1) * blockDim().x
j = threadIdx().y + (blockIdx().y - 1) * blockDim().y
if i <= length(𝑖) && j <= length(𝑗)
@inbounds k1 = Base._to_linear_index(h, id[𝑖[i], 𝑗[j]], 1, 𝑗[j])
@inbounds CUDA.atomic_add!(pointer(h, k1), x[𝑖[i],1])
@inbounds k2 = Base._to_linear_index(h, id[𝑖[i], 𝑗[j]], 2, 𝑗[j])
@inbounds CUDA.atomic_add!(pointer(h, k2), x[𝑖[i],2])
@inbounds k3 = Base._to_linear_index(h, id[𝑖[i], 𝑗[j]], 3, 𝑗[j])
@inbounds CUDA.atomic_add!(pointer(h, k3), x[𝑖[i],3])
end
return
end
# base approach - block built along the cols first, the rows (limit collisions)
function hist_gpu_2!(h::CuArray{T,3}, x::CuMatrix{T}, id::CuMatrix{Int}, 𝑖, 𝑗; MAX_THREADS=1024) where {T<:AbstractFloat}
thread_j = min(MAX_THREADS, length(𝑗))
thread_i = min(MAX_THREADS ÷ thread_j, length(𝑖))
threads = (thread_i, thread_j)
blocks = ceil.(Int, (length(𝑖), length(𝑗)) ./ threads)
@cuda blocks=blocks threads=threads kernel_2!(h, x, id, 𝑖, 𝑗)
return
end
CUDA.@time hist_gpu_2!(hist_gpu, δ_gpu, idx_gpu, 𝑖_gpu, 𝑗_gpu, MAX_THREADS=1024)
hist_gpu_1 = Array(hist_gpu)
hist_gpu_2 = Array(hist_gpu)
diff1 = hist_gpu_2 - hist_gpu_1
######################################################################################################
# best approach: loop on K indicators
######################################################################################################
function kernel_3!(h::CuDeviceArray{T,3}, x::CuDeviceMatrix{T}, id, 𝑖, 𝑗, K) where {T<:AbstractFloat}
i = threadIdx().x + (blockIdx().x - 1) * blockDim().x
j = threadIdx().y + (blockIdx().y - 1) * blockDim().y
if i <= length(𝑖) && j <= length(𝑗)
for k in 1:K
@inbounds pt = Base._to_linear_index(h, id[𝑖[i], 𝑗[j]], k, 𝑗[j])
@inbounds CUDA.atomic_add!(pointer(h, pt), x[𝑖[i],k])
end
end
return
end
# base approach - block built along the cols first, the rows (limit collisions)
function hist_gpu_3!(h::CuArray{T,3}, x::CuMatrix{T}, id::CuMatrix{Int}, 𝑖, 𝑗, K; MAX_THREADS=1024) where {T<:AbstractFloat}
thread_j = min(MAX_THREADS, length(𝑗))
thread_i = min(MAX_THREADS ÷ thread_j, length(𝑖))
threads = (thread_i, thread_j)
blocks = ceil.(Int, (length(𝑖), length(𝑗)) ./ threads)
@cuda blocks=blocks threads=threads kernel_3!(h, x, id, 𝑖, 𝑗, K)
return
end
hist_gpu_1 = Array(hist_gpu)
hist_gpu_2 = Array(hist_gpu)
diff2 = hist_gpu_2 - hist_gpu_1
diff2 - diff1
CUDA.@time hist_gpu_3!(hist_gpu, δ_gpu, idx_gpu, 𝑖_gpu, 𝑗_gpu, 3, MAX_THREADS=1024)
######################################################################################################
# 3D kernel - instead of iterating on K - Less efficient than the loop on Ks
######################################################################################################
function kernel_3D!(h::CuDeviceArray{T,3}, x::CuDeviceMatrix{T}, id, 𝑖, 𝑗, K) where {T<:AbstractFloat}
i = threadIdx().x + (blockIdx().x - 1) * blockDim().x
j = threadIdx().y + (blockIdx().y - 1) * blockDim().y
k = threadIdx().z + (blockIdx().z - 1) * blockDim().z
if i <= length(𝑖) && j <= length(𝑗)
@inbounds pt = Base._to_linear_index(h, id[𝑖[i], 𝑗[j]], k, 𝑗[j])
@inbounds CUDA.atomic_add!(pointer(h, pt), x[𝑖[i],k])
end
return
end
# base approach - block built along the cols first, the rows (limit collisions)
function hist_gpu_3D!(h::CuArray{T,3}, x::CuMatrix{T}, id::CuMatrix{Int}, 𝑖, 𝑗, K; MAX_THREADS=1024) where {T<:AbstractFloat}
thread_k = min(MAX_THREADS, K)
thread_j = min(MAX_THREADS ÷ thread_k, length(𝑗))
thread_i = min(MAX_THREADS ÷ (thread_k * thread_j), length(𝑖))
threads = (thread_i, thread_j, thread_k)
blocks = ceil.(Int, (length(𝑖), length(𝑗), K) ./ threads)
@cuda blocks=blocks threads=threads kernel_3D!(h, x, id, 𝑖, 𝑗, K)
return
end
CUDA.@time hist_gpu_3D!(hist_gpu, δ_gpu, idx_gpu, 𝑖_gpu, 𝑗_gpu, 3, MAX_THREADS=1024)
hist_gpu_1 = Array(hist_gpu)
hist_gpu_2 = Array(hist_gpu)
diff1 = hist_gpu_2 - hist_gpu_1
######################################################################################################
# 3D kernel - instead of iterating on K - No collision approach - single i thread - bad!
######################################################################################################
function kernel_3D2!(h::CuDeviceArray{T,3}, x::CuDeviceMatrix{T}, id, 𝑖, 𝑗, K) where {T<:AbstractFloat}
i = threadIdx().x + (blockIdx().x - 1) * blockDim().x
j = threadIdx().y + (blockIdx().y - 1) * blockDim().y
k = threadIdx().z + (blockIdx().z - 1) * blockDim().z
if i <= length(𝑖) && j <= length(𝑗)
# @inbounds pt = Base._to_linear_index(h, id[𝑖[i], 𝑗[j]], k, 𝑗[j])
@inbounds h[id[𝑖[i], 𝑗[j]], k, 𝑗[j]] += x[𝑖[i],k]
end
return
end
# base approach - block built along the cols first, the rows (limit collisions)
function hist_gpu_3D2!(h::CuArray{T,3}, x::CuMatrix{T}, id::CuMatrix{Int}, 𝑖, 𝑗, K; MAX_THREADS=1024) where {T<:AbstractFloat}
thread_k = min(MAX_THREADS, K)
thread_j = min(MAX_THREADS ÷ thread_k, length(𝑗))
thread_i = 1
threads = (thread_i, thread_j, thread_k)
blocks = ceil.(Int, (length(𝑖), length(𝑗), K) ./ threads)
@cuda blocks=blocks threads=threads kernel_3D2!(h, x, id, 𝑖, 𝑗, K)
return
end
CUDA.@time hist_gpu_3D2!(hist_gpu, δ_gpu, idx_gpu, 𝑖_gpu, 𝑗_gpu, 3, MAX_THREADS=1024)
hist_gpu_1 = Array(hist_gpu)
hist_gpu_2 = Array(hist_gpu)
diff1 = hist_gpu_2 - hist_gpu_1
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 878 | using Revise
using EvoTrees
using Base.Threads
L = EvoTrees.Logistic
T = Float64
nobs = 1_000_000
y = rand(T, nobs)
pred = rand(T, 1, nobs)
K = 1
δ𝑤 = zeros(T, 2 * K + 1, nobs)
w = ones(T, nobs)
δ𝑤[end, :] .= w
# nthreads: 12
Threads.nthreads()
function update_grads_v1!(::Type{EvoTrees.Linear}, δ𝑤::Matrix, p::Matrix, y::Vector; kwargs...)
@inbounds for i in eachindex(y)
δ𝑤[1, i] = 2 * (p[1, i] - y[i]) * δ𝑤[3, i]
δ𝑤[2, i] = 2 * δ𝑤[3, i]
end
end
# 958.670 μs (0 allocations: 0 bytes)
@btime update_grads_v1!(L, δ𝑤, pred, y)
function update_grads_v2!(::Type{EvoTrees.Linear}, δ𝑤::Matrix, p::Matrix, y::Vector; kwargs...)
@threads for i in eachindex(y)
@inbounds δ𝑤[1, i] = 2 * (p[1, i] - y[i]) * δ𝑤[3, i]
@inbounds δ𝑤[2, i] = 2 * δ𝑤[3, i]
end
end
# 958.670 μs (0 allocations: 0 bytes)
@btime update_grads_v2!(L, δ𝑤, pred, y)
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 2037 | using EvoTrees
module LearnAPI
abstract type Config end
abstract type Learner end
abstract type Model end
function fit(config::Config; kwargs...)
return nothing
end
function fit(config::Config, data; kwargs...)
return nothing
end
function init(config::Config, data; kwargs...)
return nothing
end
# function fit!(learner::Learner)
# return nothing
# end
function predict(model::Model, x)
return x
end
function predict!(p, model::Model, x)
return nothing
end
function isiterative(m) end
end #module
struct EvoLearner
params
end
# 1 args fit: all needed supplemental info passed through kwargs: risk of having fragmentation of naming convention, hard to follow
m = LearnAPI.fit(config::Config; kwargs)
m = LearnAPI.fit(config::EvoTrees.EvoTypes; x_train=xt, y_train=yt)
m = LearnAPI.fit(config::EvoTrees.EvoTypes; x_train=xt, y_train=yt, x_eval=xe, y_eval=ye)
# 2 args fit: forces the notion of input data on which training is performed. May facilitates dispatch/specialisation on various supported data typees
m = LearnAPI.fit(config::Config, data; kwargs)
m = LearnAPI.fit(config::EvoTrees.EvoTypes, (x_train, y_train))
m = LearnAPI.fit(config::EvoTrees.EvoTypes, (x_train, y_train); x_eval=xe, y_eval=ye)
m = LearnAPI.fit(config::EvoTrees.EvoTypes, df::DataFrame)
# Iterative models
import .LearnAPI: isiterative
LearnAPI.isiterative(m::EvoTree) = true
# 2 args model initialization
# Here a EvoTreeLearner is returned: a comprehensive struct that includes the config, the model, and cache/state
m = LearnAPI.init(config::Config, data::DataFrame; kwargs)
m = LearnAPI.init(config::EvoTrees.EvoTypes, df::DataFrame; x_eval=xe, y_eval=ye)
LearnAPI.fit!(m::EvoTree)
LearnAPI.fit!(m::EvoTree, data)
# LearnAPI.fit!(m, config::EvoTrees.EvoTypes; kwargs)
LearnAPI.predict(m::EvoTrees.EvoTypes, x)
config = EvoTreeRegressor()
# m, cache = LearnAPI.init()
# should be possible to have model that specify feature treatment upfront at the Config level?
# Or rather have those passed at the fitted level?
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 4836 | """
mse(p, y)
mse(p, y, w)
Mean squared error evaluation metric.
# Arguments
- `p`: predicted value.
- `y`: observed target variable.
- `w`: vector of weights.
"""
function mse(p::Matrix{T}, y::Vector{T}) where {T}
metric = zero(eltype(p))
@turbo for i in eachindex(y)
metric += (y[i] - p[i])^2
end
return metric / length(p)
end
function mse(p::Matrix{T}, y::Vector{T}, w::Vector{T}) where {T}
metric = zero(eltype(p))
@turbo for i in eachindex(y)
metric += (y[i] - p[i])^2 * w[i]
end
return metric / sum(w)
end
mse(p, y, w::Nothing) = mse(p, y)
"""
mae(p, y)
mae(p, y, w)
Mean absolute error evaluation metric.
# Arguments
- `p`: predicted value.
- `y`: observed target variable.
- `w`: vector of weights.
"""
function mae(p, y)
metric = zero(eltype(p))
@turbo for i in eachindex(y)
metric += abs(y[i] - p[i])
end
return metric / length(p)
end
function mae(p, y, w)
metric = zero(eltype(p))
@turbo for i in eachindex(y)
metric += abs(y[i] - p[i]) * w[i]
end
return metric / sum(w)
end
mae(p, y, w::Nothing) = mae(p, y)
"""
logloss(p, y)
logloss(p, y, w)
Logloss evaluation metric.
ylog(p) + (1-y)log(1-p)
# Arguments
- `p`: predicted value. Assumes that p is on a projected basis (ie. in the `[0-1]` range).
- `y`: observed target variable.
- `w`: vector of weights.
"""
function logloss(p, y)
ϵ = eps(eltype(y)(1e-7))
metric = zero(eltype(p))
_p = sigmoid.(p)
@turbo for i in eachindex(y)
metric += -(y[i] * log(_p[i] + ϵ) + (1 - y[i]) * log(1 - _p[i] + ϵ))
end
return metric / length(p)
end
function logloss(p, y, w)
ϵ = eps(eltype(y)(1e-7))
metric = zero(eltype(p))
_p = sigmoid.(p)
@turbo for i in eachindex(y)
metric += -(y[i] * log(_p[i] + ϵ) + (1 - y[i]) * log(1 - _p[i] + ϵ)) * w[i]
end
return metric / sum(w)
end
logloss(p, y, w::Nothing) = logloss(p, y)
"""
poisson_deviance(p, y)
poisson_deviance(p, y, w)
Poisson deviance evaluation metric.
`𝐷 = 2 * (y * log(y/p) + p - y)`
# Arguments
- `p`: predicted value. Assumes that p is on a projected basis (ie. in the `[0-Inf]` range).
- `y`: observed target variable.
- `w`: vector of weights.
"""
function poisson_deviance(p, y)
ϵ = eps(eltype(p)(1e-7))
metric = zero(eltype(p))
_p = exp.(p)
@turbo for i in eachindex(y)
metric += 2 * (y[i] * log(y[i] / p[i] + ϵ) + p[i] - y[i])
end
return metric / length(p)
end
function poisson_deviance(p, y, w)
ϵ = eps(eltype(p)(1e-7))
metric = zero(eltype(p))
_p = exp.(p)
@turbo for i in eachindex(y)
metric += 2 * (y[i] * log(y[i] / p[i] + ϵ) + p[i] - y[i]) * w[i]
end
return metric / sum(w)
end
poisson_deviance(p, y, w::Nothing) = poisson_deviance(p, y)
"""
gamma_deviance(p, y)
gamma_deviance(p, y, w)
Gamma deviance evaluation metric.
`𝐷 = 2 * (log(μ/y) + y/μ - 1)`
# Arguments
- `p`: predicted value. Assumes that p is on a projected basis (ie. in the `[0-Inf]` range).
- `y`: observed target variable.
- `w`: vector of weights.
"""
function gamma_deviance(p, y)
metric = zero(eltype(p))
_p = exp.(p)
@turbo for i in eachindex(y)
metric += 2 * (log(p[i] / y[i]) + y[i] / p[i] - 1)
end
return metric / length(p)
end
function gamma_deviance(p, y, w)
metric = zero(eltype(p))
_p = exp.(p)
@turbo for i in eachindex(y)
metric += 2 * (log(p[i] / y[i]) + y[i] / p[i] - 1) * w[i]
end
return metric / sum(w)
end
gamma_deviance(p, y, w::Nothing) = gamma_deviance(p, y)
"""
tweedie_deviance(p, y)
tweedie_deviance(p, y, w)
Tweedie deviance evaluation metric. Fixed rho (ρ) of 1.5.
𝐷 = 2 * (y²⁻ʳʰᵒ/(1-rho)(2-rho) - yμ¹⁻ʳʰᵒ/(1-rho) + μ²⁻ʳʰᵒ/(2-rho))
# Arguments
- `p`: predicted value. Assumes that p is on a projected basis (ie. in the `[0-Inf]` range).
- `y`: observed target variable.
- `w`: vector of weights.
"""
function tweedie_deviance(p, y)
rho = eltype(p)(1.5)
metric = zero(eltype(p))
_p = exp.(p)
@turbo for i in eachindex(y)
metric += 2 * (y[i]^(2 - rho) / (1 - rho) / (2 - rho) - y[i] * p[i]^(1 - rho) / (1 - rho) + p[i]^(2 - rho) / (2 - rho))
end
return metric / length(p)
end
function tweedie_deviance(p, y, w)
rho = eltype(p)(1.5)
metric = zero(eltype(p))
_p = exp.(p)
@turbo for i in eachindex(y)
metric += 2 * (y[i]^(2 - rho) / (1 - rho) / (2 - rho) - y[i] * p[i]^(1 - rho) / (1 - rho) + p[i]^(2 - rho) / (2 - rho)) * w[i]
end
return metric / sum(w)
end
tweedie_deviance(p, y, w::Nothing) = tweedie_deviance(p, y)
const metric_dict = Dict(
:mse => mse,
:mae => mae,
:logloss => logloss,
:poisson_deviance => poisson_deviance,
:gamma_deviance => gamma_deviance,
:tweedie_deviance => tweedie_deviance
) | EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 5449 | using Revise
using BenchmarkTools
using Statistics
using StatsBase: sample, quantile
using Distributions
using Random
using Plots
using EvoTrees
using EvoTrees: sigmoid, logit
# using ProfileView
# prepare a dataset
Random.seed!(12)
features = rand(10_000) .* 5
X = reshape(features, (size(features)[1], 1))
Y = sin.(features) .* 0.5 .+ 0.5
Y = logit(Y) + randn(size(Y))
Y = sigmoid(Y)
𝑖 = collect(1:size(X, 1))
# train-eval split
𝑖_sample = sample(𝑖, size(𝑖, 1), replace=false)
train_size = 0.8
𝑖_train = 𝑖_sample[1:floor(Int, train_size * size(𝑖, 1))]
𝑖_eval = 𝑖_sample[floor(Int, train_size * size(𝑖, 1))+1:end]
x_train, x_eval = X[𝑖_train, :], X[𝑖_eval, :]
y_train, y_eval = Y[𝑖_train], Y[𝑖_eval]
# linear
params1 = EvoTreeRegressor(T=Float64,
loss=:linear, metric=:mae,
nrounds=20, nbins=64,
lambda=0.1, gamma=0.1, eta=0.05,
max_depth=6, min_weight=1.0,
rowsample=0.5, colsample=1.0,
rng=123)
@time model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=5);
@time pred_train_linear = predict(model, x_train);
mean(abs.(pred_train_linear .- y_train))
# offset
@time model_offset = fit_evotree(params1; x_train, y_train, x_eval, y_eval, offset_train=pred_train_linear, print_every_n=25);
@time pred_train_linear_offset = pred_train_linear .+ predict(model_offset, x_train);
mean(abs.(pred_train_linear_offset .- y_train))
x_perm = sortperm(x_train[:, 1])
plot(x_train, y_train, msize=1, mcolor="gray", mswidth=0, background_color=RGB(1, 1, 1), seriestype=:scatter, xaxis=("feature"), yaxis=("target"), legend=true, label="")
plot!(x_train[:, 1][x_perm], pred_train_linear[x_perm], color="navy", linewidth=1.5, label="Linear")
plot!(x_train[:, 1][x_perm], pred_train_linear_offset[x_perm], color="lightblue", linewidth=1.5, label="Linear-Offset")
###############################
## gaussian - cpu
###############################
params1 = EvoTreeGaussian(
loss=:gaussian, metric=:gaussian,
nrounds=20, nbins=64,
lambda=0.1, gamma=0.1, eta=0.05,
max_depth=6, min_weight=1.0,
rowsample=1.0, colsample=1.0, rng=123)
@time model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=5);
@time pred_train = EvoTrees.predict(model, x_train);
@time pred_eval = EvoTrees.predict(model, x_eval);
@time model_res = fit_evotree(params1; x_train, y_train, offset_train=copy(pred_train), x_eval, y_eval, offset_eval=copy(pred_eval), print_every_n=5);
@time pred_train_res = EvoTrees.predict(model_res, x_train);
pred_train_stack = copy(pred_train)
pred_train_stack[:, 2] .= log.(pred_train_stack[:, 2])
pred_train_res_stack = copy(pred_train_res)
pred_train_res_stack[:, 2] .= log.(pred_train_res_stack[:, 2])
pred_train_tot = pred_train_stack + pred_train_res_stack
pred_train_tot[:, 2] .= exp.(pred_train_tot[:, 2])
x_perm = sortperm(x_train[:, 1])
plot(x_train[:, 1], y_train, ms=1, mcolor="gray", mswidth=0, background_color=RGB(1, 1, 1), seriestype=:scatter, xaxis=("feature"), yaxis=("target"), legend=true, label="")
plot!(x_train[:, 1][x_perm], pred_train[x_perm, 1], color="navy", linewidth=1.5, label="mu")
plot!(x_train[:, 1][x_perm], pred_train_res[x_perm, 1], color="blue", linewidth=1.5, label="mu-res")
plot!(x_train[:, 1][x_perm], pred_train_tot[x_perm, 1], color="lightblue", linewidth=1.5, label="mu-tot")
plot!(x_train[:, 1][x_perm], pred_train[x_perm, 2], color="darkred", linewidth=1.5, label="sigma")
plot!(x_train[:, 1][x_perm], pred_train_res[x_perm, 2], color="red", linewidth=1.5, label="sigma-res")
plot!(x_train[:, 1][x_perm], pred_train_tot[x_perm, 2], color="pink", linewidth=1.5, label="sigma-tot")
###############################
## gaussian - gpu
###############################
params1 = EvoTreeGaussian(
loss=:gaussian, metric=:gaussian,
nrounds=20, nbins=64,
lambda=0.1, gamma=0.1, eta=0.05,
max_depth=6, min_weight=1.0,
rowsample=1.0, colsample=1.0, rng=123,
device="gpu")
@time model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=5);
@time pred_train = EvoTrees.predict(model, x_train);
@time pred_eval = EvoTrees.predict(model, x_eval);
@time model_res = fit_evotree(params1; x_train, y_train, offset_train=copy(pred_train), x_eval, y_eval, offset_eval=copy(pred_eval), print_every_n=5);
@time pred_train_res = EvoTrees.predict(model_res, x_train);
pred_train_stack = copy(pred_train)
pred_train_stack[:, 2] .= log.(pred_train_stack[:, 2])
pred_train_res_stack = copy(pred_train_res)
pred_train_res_stack[:, 2] .= log.(pred_train_res_stack[:, 2])
pred_train_tot = pred_train_stack + pred_train_res_stack
pred_train_tot[:, 2] .= exp.(pred_train_tot[:, 2])
x_perm = sortperm(x_train[:, 1])
plot(x_train[:, 1], y_train, ms=1, mcolor="gray", mswidth=0, background_color=RGB(1, 1, 1), seriestype=:scatter, xaxis=("feature"), yaxis=("target"), legend=true, label="")
plot!(x_train[:, 1][x_perm], pred_train[x_perm, 1], color="darkblue", linewidth=1.5, label="mu")
plot!(x_train[:, 1][x_perm], pred_train_res[x_perm, 1], color="blue", linewidth=1.5, label="mu-res")
plot!(x_train[:, 1][x_perm], pred_train_tot[x_perm, 1], color="lightblue", linewidth=1.5, label="mu-tot")
plot!(x_train[:, 1][x_perm], pred_train[x_perm, 2], color="darkred", linewidth=1.5, label="sigma")
plot!(x_train[:, 1][x_perm], pred_train_res[x_perm, 2], color="red", linewidth=1.5, label="sigma-res")
plot!(x_train[:, 1][x_perm], pred_train_tot[x_perm, 2], color="pink", linewidth=1.5, label="sigma-tot")
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 2206 | using Statistics
using StatsBase: sample
using EvoTrees
using BenchmarkTools
using Plots
# prepare a dataset
features = rand(Int(1.25e5), 100)
# features = rand(100, 10)
X = features
Y = rand(size(X, 1))
𝑖 = collect(1:size(X, 1))
# train-eval split
𝑖_sample = sample(𝑖, size(𝑖, 1), replace = false)
train_size = 0.8
𝑖_train = 𝑖_sample[1:floor(Int, train_size * size(𝑖, 1))]
𝑖_eval = 𝑖_sample[floor(Int, train_size * size(𝑖, 1))+1:end]
x_train, x_eval = X[𝑖_train, :], X[𝑖_eval, :]
y_train, y_eval = Y[𝑖_train], Y[𝑖_eval]
#############################
# CPU - linear
#############################
params1 = EvoTreeRegressor(
T = Float32,
loss = :linear,
nrounds = 10,
lambda = 1.0,
gamma = 0.1,
eta = 0.2,
max_depth = 5,
min_weight = 1.0,
rowsample = 0.5,
colsample = 0.5,
nbins = 32,
device = "cpu",
)
# for 100k 10 rounds: 410.477 ms (44032 allocations: 182.68 MiB)
# for 100k 100 rounds: 2.177 s (404031 allocations: 626.45 MiB)
# for 1.25e6 no eval: 6.244 s (73955 allocations: 2.18 GiB)
# for 1.25e6 mse with eval data: 6.345 s (74009 allocations: 2.18 GiB)
@time model = fit_evotree(params1; x_train, y_train);
plot(model, 3)
params1 = EvoTreeRegressor(
T = Float32,
loss = :linear,
nrounds = 10,
lambda = 1.0,
gamma = 0.5,
eta = 0.2,
max_depth = 5,
min_weight = 1.0,
rowsample = 0.5,
colsample = 0.5,
nbins = 32,
device = "gpu",
)
# for 100k 10 rounds: 410.477 ms (44032 allocations: 182.68 MiB)
# for 100k 100 rounds: 2.177 s (404031 allocations: 626.45 MiB)
# for 1.25e6 no eval: 6.244 s (73955 allocations: 2.18 GiB)
# for 1.25e6 mse with eval data: 6.345 s (74009 allocations: 2.18 GiB)
@time model = fit_evotree(params1; x_train, y_train);
#############################
# CPU - Gaussian
#############################
params1 = EvoTreeGaussian(
T = Float64,
loss = :gaussian,
metric = :none,
nrounds = 10,
λ = 1.0,
γ = 0.1,
η = 0.1,
max_depth = 5,
min_weight = 1.0,
rowsample = 0.5,
colsample = 0.5,
nbins = 64,
device = "gpu",
)
@time model = fit_evotree(params1, X_train, Y_train);
EvoTrees.save(model, "data/model_gaussian_gpu.bson")
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 8208 | using Revise
using Statistics
using StatsBase: sample
using EvoTrees
using BenchmarkTools
# prepare a dataset
features = rand(Int(1.25e6), 11)
# features = rand(100, 10)
X = features
Y = rand(size(X, 1))
𝑖 = collect(1:size(X, 1))
# train-eval split
𝑖_sample = sample(𝑖, size(𝑖, 1), replace=false)
train_size = 0.8
𝑖_train = 𝑖_sample[1:floor(Int, train_size * size(𝑖, 1))]
𝑖_eval = 𝑖_sample[floor(Int, train_size * size(𝑖, 1))+1:end]
x_train, x_eval = X[𝑖_train, :], X[𝑖_eval, :]
y_train, y_eval = Y[𝑖_train], Y[𝑖_eval]
#############################
# CPU - linear
#############################
params1 = EvoTreeRegressor(T=Float32,
loss=:linear, metric=:mse,
nrounds=100,
lambda=1.0, gamma=0, eta=0.1,
max_depth=6, min_weight=1.0,
rowsample=0.5, colsample=0.5, nbins=64)
# asus laptopt: for 1.25e6 no eval: 9.650007 seconds (893.53 k allocations: 2.391 GiB, 5.52% gc time)
@time model = fit_evotree(params1; x_train, y_train);
@time model = fit_evotree(params1; x_train, y_train, metric=:mse, x_eval, y_eval, print_every_n=20, verbosity=1);
@btime model = fit_evotree(params1; x_train, y_train);
@time pred_train = predict(model, x_train);
@btime pred_train = predict(model, x_train);
gain = importance(model)
@time model, logger = fit_evotree(params1; x_train, y_train, metric=:mse, x_eval, y_eval, early_stopping_rounds=10, print_every_n=10, return_logger=true);
plot(logger[:metrics])
@time model, cache = EvoTrees.init_evotree(params1; x_train, y_train);
@time EvoTrees.grow_evotree!(model, cache, params1);
#############################
# CPU - Logistic
#############################
params1 = EvoTreeRegressor(T=Float32,
loss=:logistic, metric=:logloss,
nrounds=100,
lambda=1.0, gamma=0.0, eta=0.1,
max_depth=6, min_weight=1.0,
rowsample=0.5, colsample=0.5, nbins=64)
@time model = fit_evotree(params1; x_train, y_train);
@time pred = predict(model, x_train);
# @code_warntype pred = predict(model, x_train)
# @code_warntype pred = predict(model, x_train)
#############################
# CPU - Gaussian
#############################
params1 = EvoTreeGaussian(T=Float32,
loss=:gaussian, metric=:gaussian,
nrounds=100,
lambda=1.0, gamma=0, eta=0.1,
max_depth=6, min_weight=1.0,
rowsample=0.5, colsample=0.5, nbins=32)
# Asus laptop: 19.396380 seconds (894.90 k allocations: 3.810 GiB, 3.05% gc time)
@time model = fit_evotree(params1; x_train, y_train);
# Asus laptop: 1.667185 seconds (2.41 k allocations: 1.658 GiB)
@time model, cache = EvoTrees.init_evotree(params1, x_train, y_train);
################################
# GPU - Linear
################################
# train model
params1 = EvoTreeRegressor(T=Float32,
loss=:linear, metric=:mse,
nrounds=100,
lambda=1.0, gamma=0, eta=0.1,
max_depth=6, min_weight=1.0,
rowsample=0.5, colsample=0.5, nbins=64,
device="gpu")
# Asus laptop: 10.015568 seconds (13.80 M allocations: 1.844 GiB, 4.00% gc time)
@time model = EvoTrees.fit_evotree(params1; x_train, y_train);
@btime model = EvoTrees.fit_evotree(params1; x_train, y_train);
@time model, cache = EvoTrees.init_evotree_gpu(params1; x_train, y_train);
@time EvoTrees.grow_evotree!(model, cache);
using MLJBase
mach1 = machine(EvoTreeRegressor(loss=:linear, device="gpu", max_depth=5, eta=0.01, nrounds=10), x_train, y_train, cache=true)
mach2 = machine(EvoTreeRegressor(loss=:linear, device="gpu", max_depth=5, eta=0.01, nrounds=10), x_train, y_train, cache=false)
mach3 = machine(EvoTreeRegressor(loss=:linear, device="gpu", max_depth=5, eta=0.01, nrounds=10), x_train, y_train, cache=false)
fit!(mach1)
# X_train_32 = Float32.(X_train)
@time pred_train = EvoTrees.predict(model, X_train);
@btime pred_train = EvoTrees.predict(model, X_train);
mean(pred_train)
################################
# GPU - Logistic
################################
# train model
params1 = EvoTreeRegressor(T=Float32,
loss=:logistic, metric=:logloss,
nrounds=100,
lambda=1.0, gamma=0, eta=0.1,
max_depth=6, min_weight=1.0,
rowsample=0.5, colsample=0.5, nbins=64,
device="gpu")
@time model = fit_evotree(params1, X_train, Y_train);
@time pred_train = predict(model, X_train)
################################
# GPU - Gaussian
################################
params1 = EvoTreeGaussian(T=Float32,
loss=:gaussian,
nrounds=100,
lambda=1.0, gamma=0, eta=0.1,
max_depth=6, min_weight=1.0,
rowsample=0.5, colsample=0.5, nbins=32,
device="gpu")
# Asus laptop: 14.304369 seconds (24.81 M allocations: 2.011 GiB, 1.90% gc time)
@time model = EvoTrees.fit_evotree(params1; x_train, y_train);
# Auss laptop: 1.888472 seconds (8.40 k allocations: 1.613 GiB, 14.86% gc time)
@time model, cache = EvoTrees.init_evotree(params1, X_train, Y_train);
############################
# xgboost
############################
using XGBoost
num_round = 100
param = ["max_depth" => 5,
"eta" => 0.05,
"objective" => "reg:squarederror",
"print_every_n" => 5,
"subsample" => 0.5,
"colsample_bytree" => 0.5,
"tree_method" => "hist",
"nthread" => 16,
"max_bin" => 32]
metrics = ["rmse"]
@time xgboost(X_train, num_round, label=Y_train, param=param, metrics=metrics, silent=1);
@time dtrain = DMatrix(X_train, label=Y_train)
@time model_xgb = xgboost(dtrain, num_round, param=param, silent=1);
@btime model_xgb = xgboost(dtrain, num_round, param=param, silent=1);
@time pred_train = XGBoost.predict(model_xgb, X_train)
@time model = fit_evotree(params1, X_train, Y_train, X_eval=X_eval, Y_eval=Y_eval, print_every_n=9999, early_stopping_rounds=9999);
@btime model = fit_evotree(params1, X_train, Y_train, X_eval=X_eval, Y_eval=Y_eval, print_every_n=9999, early_stopping_rounds=9999);
@time model = fit_evotree(params1, X_train, Y_train, early_stopping_rounds=10);
@time model = fit_evotree(params1, X_train, Y_train, print_every_n=2);
# @time model = grow_gbtree(X_train, Y_train, params1, X_eval = X_eval, Y_eval = Y_eval, print_every_n = 5);
# @btime model = grow_gbtree($X_train, $Y_train, $params1, X_eval = $X_eval, Y_eval = $Y_eval);
@time pred_train = predict(model, X_train)
@code_warntype predict(model, X_train)
@time pred = zeros(SVector{1,Float64}, size(X_train, 1))
@time EvoTrees.predict!(pred, model.trees[2], X_train)
@time predict(model, X_train)
@btime pred_train = predict($model, $X_train)
mean(abs.(pred_train .- Y_train))
# logistic
params1 = EvoTreeRegressor(
loss=:logistic, metric=:logloss,
nrounds=100,
λ=0.0f0, γ=0.0f0, η=0.1f0,
max_depth=6, min_weight=1.0f0,
rowsample=0.5f0, colsample=0.5f0, α=0.5f0, nbins=32)
@time model = fit_evotree(params1, X_train, Y_train);
@time model = fit_evotree(params1, X_train, Y_train, X_eval=X_eval, Y_eval=Y_eval, print_every_n=10)
@time pred_train = predict(model, X_train)
# Quantile
params1 = EvoTreeRegressor(
loss=:quantile, metric=:quantile, α=0.80f0,
nrounds=100,
λ=0.1f0, γ=0.0f0, η=0.1f0,
max_depth=6, min_weight=1.0f0,
rowsample=0.5f0, colsample=0.5f0, nbins=32)
@time model = fit_evotree(params1, X_train, Y_train);
@time model = fit_evotree(params1, X_train, Y_train, X_eval=X_eval, Y_eval=Y_eval, print_every_n=10)
@time pred_train = predict(model, X_train)
# gaussian
params1 = EvoTreeGaussian(
loss=:gaussian, metric=:gaussian,
nrounds=100, α=0.5f0,
λ=0.0f0, γ=0.0f0, η=0.1f0,
max_depth=6, min_weight=10.0f0,
rowsample=0.5f0, colsample=0.5f0, nbins=32)
@time model = fit_evotree(params1, X_train, Y_train);
@time model = fit_evotree(params1, X_train, Y_train, X_eval=X_eval, Y_eval=Y_eval, print_every_n=10)
@time pred_train = predict(model, X_train)
# softmax
params1 = EvoTreeClassifier(
loss=:softmax, metric=:mlogloss,
nrounds=100, α=0.5f0,
λ=0.0f0, γ=0.0f0, η=0.1f0,
max_depth=6, min_weight=10.0f0,
rowsample=0.5f0, colsample=0.5f0, nbins=32)
Y_train_int = UInt32.(round.(Y_train * 2) .+ 1)
Y_eval_int = UInt32.(round.(Y_eval * 2) .+ 1)
Y_train_int = Int.(Y_train_int)
@time model = fit_evotree(params1, X_train, Y_train_int, print_every_n=10);
@time model = fit_evotree(params1, X_train, Y_train_int, X_eval=X_eval, Y_eval=Y_eval_int, print_every_n=10)
@time pred_train = predict(model, X_train)
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 2590 | using Revise
using BenchmarkTools
using Statistics
using StatsBase: sample, quantile
using Distributions
using Random
using Plots
using EvoTrees
using DataFrames
using CategoricalArrays
using EvoTrees: predict, sigmoid, logit
# using ProfileView
# prepare a dataset
nobs = 10_000
Random.seed!(123)
x_num = rand(nobs) .* 5
lvls = ["A", "B", "C"]
x_cat = categorical(rand(lvls, nobs), levels=lvls, ordered=false)
levels(x_cat)
isordered(x_cat)
y = sin.(x_num) .* 0.5 .+ 0.5
y = logit(y) .+ 1.0 .* (x_cat .== "B") .- 1.0 .* (x_cat .== "C") + randn(nobs)
y = sigmoid(y)
is = collect(1:nobs)
dtot = DataFrame(x_num=x_num, x_cat=x_cat, y=y)
# train-eval split
is = sample(is, length(is), replace=false)
train_size = 0.8
i_train = is[1:floor(Int, train_size * size(is, 1))]
i_eval = is[floor(Int, train_size * size(is, 1))+1:end]
dtrain = dtot[i_train, :]
deval = dtot[i_eval, :]
# linear
params1 = EvoTreeRegressor(
T=Float32,
loss=:linear,
nrounds=200,
nbins=64,
lambda=0.1,
gamma=0.05,
eta=0.05,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=1.0,
rng=123,
)
@time model = fit_evotree(
params1,
dtrain;
fnames=["x_cat", "x_num"],
target_name="y",
deval,
metric=:mse,
print_every_n=25,
early_stopping_rounds=20,
verbosity=0
);
pred = model(dtrain);
# @btime model = fit_evotree(
# params1,
# dtrain;
# fnames="x_num",
# target_name="y",
# verbosity=0
# );
# laptop: 51.651 ms (237548 allocations: 23.94 MiB)
# plot(logger[:metrics])
# @time pred_train_linear = predict(model, x_train);
# @time pred_eval_linear = predict(model, x_eval)
# mean((pred_train_linear .- y_train) .^ 2)
# mean((pred_eval_linear .- y_eval) .^ 2)
plot(
dtrain.x_num,
dtrain.y,
msize=0.5,
mcolor="darkgray",
mswidth=0,
background_color=RGB(1, 1, 1),
seriestype=:scatter,
xaxis=("feature"),
yaxis=("target"),
legend=true,
label="",
)
dinfer = dtrain[dtrain.x_cat.=="A", :]
pred = model(dinfer)
x_perm = sortperm(dinfer.x_num)
plot!(
dinfer.x_num[x_perm],
pred[x_perm],
color="lightblue",
linewidth=1.5,
label="Linear - A",
)
dinfer = dtrain[dtrain.x_cat.=="B", :]
pred = model(dinfer);
x_perm = sortperm(dinfer.x_num)
plot!(
dinfer.x_num[x_perm],
pred[x_perm],
color="blue",
linewidth=1.5,
label="Linear - B",
)
dinfer = dtrain[dtrain.x_cat.=="C", :]
pred = model(dinfer);
x_perm = sortperm(dinfer.x_num)
plot!(
dinfer.x_num[x_perm],
pred[x_perm],
color="navy",
linewidth=1.5,
label="Linear - C",
)
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 2689 | using Revise
using BenchmarkTools
using Statistics
using StatsBase: sample, quantile
using Distributions
using Random
using Plots
using CUDA
using EvoTrees
using DataFrames
using CategoricalArrays
using EvoTrees: predict, sigmoid, logit
# using ProfileView
device = "gpu"
# prepare a dataset
nobs = 10_000
Random.seed!(123)
x_num = rand(nobs) .* 5
x_cat = categorical(rand(["A", "B", "C"], nobs))
y = sin.(x_num) .* 0.5 .+ 0.5
y = logit(y) .+ 1.0 .* (x_cat .== "B") .- 1.0 .* (x_cat .== "C") + randn(nobs)
y = sigmoid(y)
is = collect(1:nobs)
dtot = DataFrame(x_num=x_num, x_cat=x_cat, y=y)
# train-eval split
is = sample(is, length(is), replace=false)
train_size = 0.8
i_train = is[1:floor(Int, train_size * size(is, 1))]
i_eval = is[floor(Int, train_size * size(is, 1))+1:end]
dtrain = dtot[i_train, :]
deval = dtot[i_eval, :]
# linear
params1 = EvoTreeRegressor(
T=Float32,
loss=:linear,
nrounds=200,
nbins=64,
lambda=0.1,
gamma=0.05,
eta=0.05,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=1.0,
rng=123,
)
@time model = EvoTrees.fit_evotree(
params1,
dtrain;
fnames=["x_num", "x_cat"],
target_name="y",
deval,
device,
metric=:mse,
print_every_n=25,
early_stopping_rounds=20,
verbosity=0
);
@time pred = model(dtrain);
@time pred = model(dtrain; device);
# @btime model = EvoTrees.fit_evotree_df(
# params1;
# dtrain,
# fnames_num="x_num",
# target_name="y",
# # print_every_n = 25,
# # early_stopping_rounds = 20,
# verbosity=0
# );
# laptop: 51.651 ms (237548 allocations: 23.94 MiB)
# plot(logger[:metrics])
# @time pred_train_linear = predict(model, x_train);
# @time pred_eval_linear = predict(model, x_eval)
# mean((pred_train_linear .- y_train) .^ 2)
# mean((pred_eval_linear .- y_eval) .^ 2)
plot(
dtrain.x_num,
dtrain.y,
msize=0.5,
mcolor="darkgray",
mswidth=0,
background_color=RGB(1, 1, 1),
seriestype=:scatter,
xaxis=("feature"),
yaxis=("target"),
legend=true,
label="",
)
dinfer = dtrain[dtrain.x_cat.=="A", :]
pred = model(dinfer)
x_perm = sortperm(dinfer.x_num)
plot!(
dinfer.x_num[x_perm],
pred[x_perm],
color="lightblue",
linewidth=1.5,
label="Linear - A",
)
dinfer = dtrain[dtrain.x_cat.=="B", :]
pred = model(dinfer);
x_perm = sortperm(dinfer.x_num)
plot!(
dinfer.x_num[x_perm],
pred[x_perm],
color="blue",
linewidth=1.5,
label="Linear - B",
)
dinfer = dtrain[dtrain.x_cat.=="C", :]
pred = model(dinfer);
x_perm = sortperm(dinfer.x_num)
plot!(
dinfer.x_num[x_perm],
pred[x_perm],
color="navy",
linewidth=1.5,
label="Linear - C",
)
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 13022 | using Revise
using BenchmarkTools
using Statistics
using StatsBase: sample, quantile
using Distributions
using Random
using Plots
using EvoTrees
using EvoTrees: predict, sigmoid, logit
# using ProfileView
tree_type = "binary"
# prepare a dataset
Random.seed!(123)
features = rand(10_000) .* 5
X = reshape(features, (size(features)[1], 1))
Y = sin.(features) .* 0.5 .+ 0.5
Y = logit(Y) + randn(size(Y))
Y = sigmoid(Y)
is = collect(1:size(X, 1))
# train-eval split
is = sample(is, length(is), replace=false)
train_size = 0.8
i_train = is[1:floor(Int, train_size * size(is, 1))]
i_eval = is[floor(Int, train_size * size(is, 1))+1:end]
x_train, x_eval = X[i_train, :], X[i_eval, :]
y_train, y_eval = Y[i_train], Y[i_eval]
# linear
params1 = EvoTreeRegressor(;
loss=:mse,
alpha=1,
nrounds=200,
nbins=64,
lambda=0.01,
gamma=0.1,
eta=0.05,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=1.0,
tree_type,
rng=122
)
@time model = fit_evotree(
params1;
x_train,
y_train,
x_eval,
y_eval,
metric=:mse,
print_every_n=25,
early_stopping_rounds=20
);
# plot(model, 2)
# laptop: 51.651 ms (237548 allocations: 23.94 MiB)
# @btime model = fit_evotree(params1; x_train, y_train, x_eval = x_eval, y_eval = y_eval, metric = :mse, print_every_n = 999, verbosity=0);
# Profile.clear() # in case we have any previous profiling data
# @profile fit_evotree(params1, X_train, Y_train, X_eval = X_eval, Y_eval = Y_eval, print_every_n = 25)
# ProfileView.view()
model, logger = fit_evotree(
params1;
x_train,
y_train,
metric=:mse,
x_eval,
y_eval,
early_stopping_rounds=20,
print_every_n=10,
return_logger=true
);
# plot(logger[:metrics])
# @btime model = grow_gbtree($X_train, $Y_train, $params1, X_eval = $X_eval, Y_eval = $Y_eval, print_every_n = 25, metric=:mae)
@time pred_train_linear = model(x_train);
@time pred_eval_linear = model(x_eval)
mean((pred_train_linear .- y_train) .^ 2)
mean((pred_eval_linear .- y_eval) .^ 2)
# linear weighted
params1 = EvoTreeRegressor(;
T=Float64,
loss=:linear,
nrounds=500,
nbins=64,
lambda=0.1,
gamma=0.1,
eta=0.1,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=1.0,
tree_type,
rng=123
)
# W_train = ones(eltype(Y_train), size(Y_train)) .* 5
w_train = rand(eltype(y_train), size(y_train)) .+ 0
@time model = fit_evotree(
params1;
x_train,
y_train,
w_train,
x_eval,
y_eval,
print_every_n=25,
early_stopping_rounds=50,
metric=:mse
);
# 67.159 ms (77252 allocations: 28.06 MiB)
# @time model = fit_evotree(params1, X_train, Y_train, X_eval = X_eval, Y_eval = Y_eval, print_every_n = 999);
# @btime model = fit_evotree($params1, $X_train, $Y_train, X_eval = $X_eval, Y_eval = $Y_eval);
# Profile.clear() # in case we have any previous profiling data
# @profile fit_evotree(params1, X_train, Y_train, X_eval = X_eval, Y_eval = Y_eval, print_every_n = 25)
# ProfileView.view()
# @btime model = grow_gbtree($X_train, $Y_train, $params1, X_eval = $X_eval, Y_eval = $Y_eval, print_every_n = 25, metric=:mae)
@time pred_train_linear_w = model(x_train);
@time pred_eval_linear_w = model(x_eval)
mean(abs.(pred_train_linear_w .- y_train))
sqrt(mean((pred_train_linear_w .- y_train) .^ 2))
# logistic / cross-entropy
params1 = EvoTreeRegressor(;
loss=:logistic,
nrounds=200,
nbins=64,
lambda=0.1,
gamma=0.1,
eta=0.1,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=1.0,
tree_type
)
@time model = fit_evotree(
params1;
x_train,
y_train,
x_eval,
y_eval,
print_every_n=25,
early_stopping_rounds=50,
metric=:logloss
);
# 218.040 ms (123372 allocations: 34.71 MiB)
# @btime model = fit_evotree($params1, $X_train, $Y_train, X_eval = $X_eval, Y_eval = $Y_eval)
@time pred_train_logistic = model(x_train);
@time pred_eval_logistic = model(x_eval)
sqrt(mean((pred_train_logistic .- y_train) .^ 2))
# L1
params1 = EvoTreeRegressor(;
loss=:l1,
alpha=0.5,
nrounds=500,
nbins=64,
lambda=0.0,
gamma=0.0,
eta=0.1,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=1.0,
tree_type,
)
@time model = fit_evotree(
params1;
x_train,
y_train,
x_eval,
y_eval,
print_every_n=25,
early_stopping_rounds=50,
metric=:mae
);
@time pred_train_L1 = model(x_train)
@time pred_eval_L1 = model(x_eval)
sqrt(mean((pred_train_L1 .- y_train) .^ 2))
x_perm = sortperm(x_train[:, 1])
plot(
x_train,
y_train,
msize=0.5,
mcolor="darkgray",
mswidth=0,
background_color=RGB(1, 1, 1),
seriestype=:scatter,
xaxis=("feature"),
yaxis=("target"),
legend=true,
label="",
)
plot!(
x_train[:, 1][x_perm],
pred_train_linear[x_perm],
color="navy",
linewidth=1.5,
label="Linear",
)
plot!(
x_train[:, 1][x_perm],
pred_train_linear_w[x_perm],
color="lightblue",
linewidth=1.5,
label="LinearW",
)
plot!(
x_train[:, 1][x_perm],
pred_train_logistic[x_perm],
color="darkred",
linewidth=1.5,
label="Logistic",
)
plot!(
x_train[:, 1][x_perm],
pred_train_L1[x_perm],
color="darkgreen",
linewidth=1.5,
label="L1",
)
savefig("figures/regression-sinus-$tree_type.png")
# Poisson
params1 = EvoTreeCount(;
loss=:poisson,
nrounds=500,
nbins=64,
lambda=0.1,
gamma=0.1,
eta=0.1,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=1.0,
tree_type,
)
@time model = fit_evotree(
params1;
x_train,
y_train,
x_eval,
y_eval,
print_every_n=25,
early_stopping_rounds=50,
metric=:poisson
);
@time pred_train_poisson = model(x_train);
sqrt(mean((pred_train_poisson .- y_train) .^ 2))
# Gamma
params1 = EvoTreeRegressor(;
loss=:gamma,
nrounds=500,
nbins=64,
lambda=0.1,
gamma=0.1,
eta=0.1,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=1.0,
tree_type,
)
@time model = fit_evotree(
params1;
x_train,
y_train,
x_eval,
y_eval,
print_every_n=25,
early_stopping_rounds=50,
metric=:gamma
);
@time pred_train_gamma = model(x_train);
sqrt(mean((pred_train_gamma .- y_train) .^ 2))
# Tweedie
params1 = EvoTreeRegressor(;
loss=:tweedie,
nrounds=500,
nbins=64,
lambda=0.1,
gamma=0.1,
eta=0.1,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=1.0,
tree_type,
)
@time model = fit_evotree(
params1;
x_train,
y_train,
x_eval,
y_eval,
print_every_n=25,
early_stopping_rounds=50,
metric=:tweedie
);
@time pred_train_tweedie = model(x_train);
sqrt(mean((pred_train_tweedie .- y_train) .^ 2))
x_perm = sortperm(x_train[:, 1])
plot(
x_train,
y_train,
msize=0.5,
mcolor="darkgray",
mswidth=0,
background_color=RGB(1, 1, 1),
seriestype=:scatter,
xaxis=("feature"),
yaxis=("target"),
legend=true,
label="",
)
plot!(
x_train[:, 1][x_perm],
pred_train_poisson[x_perm],
color="navy",
linewidth=1.5,
label="Poisson",
)
plot!(
x_train[:, 1][x_perm],
pred_train_gamma[x_perm],
color="lightblue",
linewidth=1.5,
label="Gamma",
)
plot!(
x_train[:, 1][x_perm],
pred_train_tweedie[x_perm],
color="darkred",
linewidth=1.5,
label="Tweedie",
)
savefig("figures/regression-sinus2-$tree_type.png")
###############################
## Quantiles
###############################
# q50
params1 = EvoTreeRegressor(;
loss=:quantile,
alpha=0.5,
nrounds=500,
nbins=64,
lambda=0.1,
gamma=0.0,
eta=0.1,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=1.0,
tree_type,
)
@time model = fit_evotree(
params1;
x_train,
y_train,
x_eval,
y_eval,
print_every_n=25,
early_stopping_rounds=50,
metric=:mae
);
# 116.822 ms (74496 allocations: 36.41 MiB) for 100 iterations
# @btime model = grow_gbtree($X_train, $Y_train, $params1, X_eval = $X_eval, Y_eval = $Y_eval)
@time pred_train_q50 = model(x_train)
sum(pred_train_q50 .< y_train) / length(y_train)
# q20
params1 = EvoTreeRegressor(;
loss=:quantile,
alpha=0.2,
nrounds=300,
nbins=64,
lambda=0.1,
gamma=0.0,
eta=0.1,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=1.0,
tree_type,
)
@time model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25);
@time pred_train_q20 = model(x_train)
sum(pred_train_q20 .< y_train) / length(y_train)
# q80
params1 = EvoTreeRegressor(;
loss=:quantile,
alpha=0.8,
nrounds=300,
nbins=64,
lambda=0.1,
gamma=0.0,
eta=0.1,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=1.0,
tree_type,
)
@time model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25)
@time pred_train_q80 = model(x_train)
sum(pred_train_q80 .< y_train) / length(y_train)
x_perm = sortperm(x_train[:, 1])
plot(
x_train,
y_train,
ms=0.5,
mcolor="darkgray",
mswidth=0,
background_color=RGB(1, 1, 1),
seriestype=:scatter,
xaxis=("feature"),
yaxis=("target"),
legend=true,
label="",
)
plot!(
x_train[:, 1][x_perm],
pred_train_q50[x_perm],
color="navy",
linewidth=1.5,
label="Median",
)
plot!(
x_train[:, 1][x_perm],
pred_train_q20[x_perm],
color="darkred",
linewidth=1.5,
label="Q20",
)
plot!(
x_train[:, 1][x_perm],
pred_train_q80[x_perm],
color="darkgreen",
linewidth=1.5,
label="Q80",
)
savefig("figures/quantiles-sinus-$tree_type.png")
###############################
## gaussian
###############################
params1 = EvoTreeMLE(;
T=Float64,
loss=:gaussian,
nrounds=500,
nbins=64,
lambda=0.1,
gamma=0.1,
eta=0.1,
max_depth=6,
min_weight=10,
rowsample=0.5,
colsample=1.0,
rng=123,
tree_type,
)
@time model = fit_evotree(
params1;
x_train,
y_train,
x_eval,
y_eval,
print_every_n=25,
early_stopping_rounds=50,
metric=:gaussian
);
# @time model = fit_evotree(params1, X_train, Y_train, print_every_n = 10);
@time pred_train = model(x_train);
# @btime pred_train = EvoTrees.predict(model, X_train);
pred_gauss =
[Distributions.Normal(pred_train[i, 1], pred_train[i, 2]) for i in axes(pred_train, 1)]
pred_q80 = quantile.(pred_gauss, 0.8)
pred_q20 = quantile.(pred_gauss, 0.2)
mean(y_train .< pred_q80)
mean(y_train .< pred_q20)
x_perm = sortperm(x_train[:, 1])
plot(
x_train[:, 1],
y_train,
ms=0.5,
mcolor="darkgray",
mswidth=0,
background_color=RGB(1, 1, 1),
seriestype=:scatter,
xaxis=("feature"),
yaxis=("target"),
legend=true,
label="",
)
plot!(
x_train[:, 1][x_perm],
pred_train[x_perm, 1],
color="navy",
linewidth=1.5,
label="mu",
)
plot!(
x_train[:, 1][x_perm],
pred_train[x_perm, 2],
color="darkred",
linewidth=1.5,
label="sigma",
)
plot!(
x_train[:, 1][x_perm],
pred_q20[x_perm, 1],
color="darkgreen",
linewidth=1.5,
label="q20",
)
plot!(
x_train[:, 1][x_perm],
pred_q80[x_perm, 1],
color="darkgreen",
linewidth=1.5,
label="q80",
)
savefig("figures/gaussian-sinus-$tree_type.png")
###############################
## Logistic
###############################
params1 = EvoTrees.EvoTreeMLE(;
loss=:logistic,
nrounds=500,
nbins=64,
lambda=0.1,
gamma=0.1,
eta=0.1,
max_depth=6,
min_weight=10,
rowsample=0.5,
colsample=1.0,
tree_type,
rng=123,
)
@time model = fit_evotree(
params1;
x_train,
y_train,
x_eval,
y_eval,
print_every_n=25,
early_stopping_rounds=50,
metric=:logistic_mle
);
# @time model = fit_evotree(params1, X_train, Y_train, print_every_n = 10);
@time pred_train = model(x_train);
# @btime pred_train = EvoTrees.predict(model, X_train);
pred_logistic = [
Distributions.Logistic(pred_train[i, 1], pred_train[i, 2]) for i in axes(pred_train, 1)
]
pred_q80 = quantile.(pred_logistic, 0.8)
pred_q20 = quantile.(pred_logistic, 0.2)
mean(y_train .< pred_q80)
mean(y_train .< pred_q20)
x_perm = sortperm(x_train[:, 1])
plot(
x_train[:, 1],
y_train,
ms=0.5,
mcolor="darkgray",
mswidth=0,
background_color=RGB(1, 1, 1),
seriestype=:scatter,
xaxis=("feature"),
yaxis=("target"),
legend=true,
label="",
)
plot!(
x_train[:, 1][x_perm],
pred_train[x_perm, 1],
color="navy",
linewidth=1.5,
label="mu",
)
plot!(
x_train[:, 1][x_perm],
pred_train[x_perm, 2],
color="darkred",
linewidth=1.5,
label="s",
)
plot!(
x_train[:, 1][x_perm],
pred_q20[x_perm, 1],
color="darkgreen",
linewidth=1.5,
label="q20",
)
plot!(
x_train[:, 1][x_perm],
pred_q80[x_perm, 1],
color="darkgreen",
linewidth=1.5,
label="q80",
)
savefig("figures/logistic-sinus-$tree_type.png")
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 6525 | using BenchmarkTools
using Statistics
using StatsBase: sample, quantile
using Distributions
using Random
using Plots
using Revise
using CUDA
using EvoTrees
using EvoTrees: predict, sigmoid, logit
# using ProfileView
# prepare a dataset
tree_type = "binary" # binary/oblivious
device = "gpu"
Random.seed!(123)
features = rand(10_000) .* 5
X = reshape(features, (size(features)[1], 1))
Y = sin.(features) .* 0.5 .+ 0.5
Y = logit(Y) + randn(size(Y))
Y = sigmoid(Y)
is = collect(1:size(X, 1))
# train-eval split
i_sample = sample(is, size(is, 1), replace=false)
train_size = 0.8
i_train = i_sample[1:floor(Int, train_size * size(is, 1))]
i_eval = i_sample[floor(Int, train_size * size(is, 1))+1:end]
x_train, x_eval = X[i_train, :], X[i_eval, :]
y_train, y_eval = Y[i_train], Y[i_eval]
# linear
params1 = EvoTreeRegressor(;
loss=:linear,
nrounds=500,
nbins=64,
lambda=0.1,
gamma=0.1,
eta=0.1,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=1.0,
tree_type
)
# @time model = fit_evotree(params1; x_train, y_train);
@time model = fit_evotree(
params1;
x_train,
y_train,
x_eval,
y_eval,
metric=:mse,
print_every_n=25,
early_stopping_rounds=50,
device
);
# model, logger = fit_evotree(params1; x_train, y_train, metric=:mse, x_eval, y_eval, early_stopping_rounds=20, print_every_n=10, return_logger=true);
@time pred_train_linear_cpu = model(x_train)
@time pred_train_linear_gpu = model(x_train; device)
sum(pred_train_linear_gpu .- pred_train_linear_cpu)
# @btime model = grow_gbtree($X_train, $Y_train, $params1, X_eval = $X_eval, Y_eval = $Y_eval, print_every_n = 25, metric=:mae)
@time pred_train_linear = predict(model, x_train)
mean(abs.(pred_train_linear .- y_train))
sqrt(mean((pred_train_linear .- y_train) .^ 2))
# logistic / cross-entropy
params1 = EvoTreeRegressor(;
T=Float32,
loss=:logistic,
nrounds=500,
nbins=64,
lambda=0.1,
gamma=0.1,
eta=0.1,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=1.0,
tree_type
)
@time model = fit_evotree(
params1;
x_train,
y_train,
x_eval,
y_eval,
metric=:logloss,
print_every_n=25,
early_stopping_rounds=50,
device
);
@time pred_train_logistic = model(x_train; device)
sqrt(mean((pred_train_logistic .- y_train) .^ 2))
# poisson
params1 = EvoTreeCount(;
T=Float32,
loss=:poisson,
nrounds=500,
nbins=64,
lambda=0.1,
gamma=0.1,
eta=0.1,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=1.0,
tree_type
)
@time model = fit_evotree(
params1;
x_train,
y_train,
x_eval,
y_eval,
metric=:poisson,
print_every_n=25,
early_stopping_rounds=50,
device
);
@time pred_train_poisson = model(x_train; device)
sqrt(mean((pred_train_poisson .- y_train) .^ 2))
# gamma
params1 = EvoTreeRegressor(;
T=Float32,
loss=:gamma,
nrounds=500,
nbins=64,
lambda=0.1,
gamma=0.1,
eta=0.1,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=1.0,
tree_type
)
@time model = fit_evotree(
params1;
x_train,
y_train,
x_eval,
y_eval,
metric=:gamma,
print_every_n=25,
early_stopping_rounds=50,
device
);
@time pred_train_gamma = model(x_train; device)
sqrt(mean((pred_train_gamma .- y_train) .^ 2))
# tweedie
params1 = EvoTreeRegressor(;
loss=:tweedie,
nrounds=500,
nbins=64,
lambda=0.1,
gamma=0.1,
eta=0.1,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=1.0,
tree_type
)
@time model = fit_evotree(
params1;
x_train,
y_train,
x_eval,
y_eval,
metric=:tweedie,
print_every_n=25,
early_stopping_rounds=50,
device
);
@time pred_train_tweedie = model(x_train; device)
sqrt(mean((pred_train_tweedie .- y_train) .^ 2))
x_perm = sortperm(x_train[:, 1])
plot(
x_train,
y_train,
msize=0.5,
mcolor="darkgray",
mswidth=0,
background_color=RGB(1, 1, 1),
seriestype=:scatter,
xaxis=("feature"),
yaxis=("target"),
legend=true,
label="",
)
plot!(
x_train[:, 1][x_perm],
pred_train_linear[x_perm],
color="navy",
linewidth=1.5,
label="Linear",
)
plot!(
x_train[:, 1][x_perm],
pred_train_logistic[x_perm],
color="darkred",
linewidth=1.5,
label="Logistic",
)
plot!(
x_train[:, 1][x_perm],
pred_train_poisson[x_perm],
color="green",
linewidth=1.5,
label="Poisson",
)
plot!(
x_train[:, 1][x_perm],
pred_train_gamma[x_perm],
color="pink",
linewidth=1.5,
label="Gamma",
)
plot!(
x_train[:, 1][x_perm],
pred_train_tweedie[x_perm],
color="orange",
linewidth=1.5,
label="Tweedie",
)
savefig("figures/regression-sinus-$tree_type-gpu.png")
###############################
## gaussian
###############################
params1 = EvoTreeGaussian(;
nrounds=500,
nbins=64,
lambda=0.1,
gamma=0.1,
eta=0.1,
max_depth=6,
min_weight=10,
rowsample=0.5,
colsample=1.0,
rng=123,
tree_type
)
@time model = fit_evotree(params1; x_train, y_train);
@time model = fit_evotree(
params1;
x_train,
y_train,
x_eval,
y_eval,
print_every_n=25,
early_stopping_rounds=50,
metric=:gaussian,
device
);
# @time model = fit_evotree(params1, X_train, Y_train, print_every_n = 10);
@time pred_train_gaussian = model(x_train; device)
pred_gauss = [
Distributions.Normal(pred_train_gaussian[i, 1], pred_train_gaussian[i, 2]) for
i in axes(pred_train_gaussian, 1)
]
pred_q80 = quantile.(pred_gauss, 0.8)
pred_q20 = quantile.(pred_gauss, 0.2)
mean(y_train .< pred_q80)
mean(y_train .< pred_q20)
x_perm = sortperm(x_train[:, 1])
plot(
x_train[:, 1],
y_train,
ms=0.5,
mcolor="darkgray",
mswidth=0,
background_color=RGB(1, 1, 1),
seriestype=:scatter,
xaxis=("feature"),
yaxis=("target"),
legend=true,
label="",
)
plot!(
x_train[:, 1][x_perm],
pred_train_gaussian[x_perm, 1],
color="navy",
linewidth=1.5,
label="mu",
)
plot!(
x_train[:, 1][x_perm],
pred_train_gaussian[x_perm, 2],
color="darkred",
linewidth=1.5,
label="sigma",
)
plot!(
x_train[:, 1][x_perm],
pred_q20[x_perm, 1],
color="green",
linewidth=1.5,
label="q20",
)
plot!(
x_train[:, 1][x_perm],
pred_q80[x_perm, 1],
color="green",
linewidth=1.5,
label="q80",
)
savefig("figures/gaussian-sinus-$tree_type-gpu.png")
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 17164 | using Revise
using Statistics
using StatsBase: sample, sample!
using EvoTrees
using BenchmarkTools
using CUDA
using Base.Threads: nthreads, threadid, @threads, @spawn
using Random: seed!, Xoshiro, MersenneTwister, TaskLocalRNG
# prepare a dataset
features = rand(Int(1.25e6), 100)
# features = rand(100, 10)
X = features
Y = rand(size(X, 1))
𝑖 = collect(1:size(X, 1))
# train-eval split
𝑖_sample = sample(𝑖, size(𝑖, 1), replace = false)
train_size = 0.8
𝑖_train = 𝑖_sample[1:floor(Int, train_size * size(𝑖, 1))]
𝑖_eval = 𝑖_sample[floor(Int, train_size * size(𝑖, 1))+1:end]
x_train, x_eval = X[𝑖_train, :], X[𝑖_eval, :]
y_train, y_eval = Y[𝑖_train], Y[𝑖_eval]
###########################
# Tree CPU
###########################
params_c = EvoTreeRegressor(
T = Float32,
loss = :linear,
nrounds = 100,
lambda = 0.1,
gamma = 0.0,
eta = 0.1,
max_depth = 6,
min_weight = 1.0,
rowsample = 0.5,
colsample = 0.5,
nbins = 64,
);
model_c, cache_c = EvoTrees.init_evotree(params_c; x_train, y_train);
# initialize from cache
X_size = size(cache_c.x_bin)
# 897.800 μs (6 allocations: 736 bytes)
@time EvoTrees.update_grads!(cache_c.∇, cache_c.pred, cache_c.y, params_c)
# @btime EvoTrees.update_grads!($params_c.loss, $cache_c.δ𝑤, $cache_c.pred_cpu, $cache_c.Y_cpu, $params_c.α)
# select random rows and cols
cache_c.nodes[1].is = EvoTrees.subsample(cache_c.is, cache_c.mask, params_c.rowsample, cache_c.rngs);
length(cache_c.nodes[1].is)
sample!(params_c.rng, cache_c.js_, cache_c.js, replace = false, ordered = true);
# @btime sample!(params_c.rng, cache_c.𝑖_, cache_c.nodes[1].𝑖, replace=false, ordered=true);
is = cache_c.nodes[1].is
js = cache_c.js
L = EvoTrees.Linear
K = 1
T = Float32
######################################################
# sampling experiements
######################################################
# 7.892 ms (0 allocations: 0 bytes)
function get_rand!(rng, mask)
@threads for i in eachindex(mask)
@inbounds mask[i] = rand(rng, UInt8)
end
end
function get_rand_repro_A!(rngs, mask)
@threads for i in eachindex(mask)
tid = threadid()
@inbounds mask[i] = rand(rngs[tid], UInt8)
end
end
function get_rand_repro_B!(rngs, mask)
nblocks = length(rngs)
chunk_size = cld(length(mask), nblocks)
@threads for bid = 1:nblocks
i_start = chunk_size * (bid - 1) + 1
i_stop = min(length(mask), i_start + chunk_size - 1)
rng = rngs[bid]
@inbounds for i = i_start:i_stop
@inbounds mask[i] = rand(rng, UInt8)
end
end
end
function get_rand_repro_C!(rngs, mask)
nblocks = length(rngs)
chunk_size = cld(length(mask), nblocks)
@threads for bid = 1:nblocks
tx = threadid()
i_start = chunk_size * (tx - 1) + 1
i_stop = min(length(mask), i_start + chunk_size - 1)
rng = rngs[tx]
@inbounds for i = i_start:i_stop
mask[i] = rand(rng, UInt8)
end
end
end
nobs = 1_000_000
mask_ori = zeros(UInt32, nobs)
rng = TaskLocalRNG()
seed!(rng, 123)
# rng = Xoshiro(123 + 1)
# rng = MersenneTwister(123 + 1)
get_rand!(rng, mask_ori)
@btime get_rand!($rng, $mask_ori)
mask_A = zeros(UInt32, nobs)
rngs = [Random.MersenneTwister(123 + i) for i = 1:nthreads()]
get_rand_repro_A!(rngs, mask_A)
@btime get_rand_repro_A!($rngs, $mask_A)
mask_B = zeros(UInt32, nobs)
rngs = [Xoshiro(123 + i) for i = 1:nthreads()]
get_rand_repro_B!(rngs, mask_B)
@btime get_rand_repro_B!($rngs, $mask_B)
mask_C = zeros(UInt32, nobs)
rngs = [Random.MersenneTwister(123 + i) for i = 1:nthreads()]
get_rand_repro_C!(rngs, mask_C)
@btime get_rand_repro_C!($rngs, $mask_C)
rngs = [Random.MersenneTwister(123 + i) for i = 1:nthreads()]
rng1 = Random.MersenneTwister(123)
rng2 = Random.MersenneTwister(124)
rand(rngs[1], UInt32)
rand(rng1, UInt32)
rand(rng2, UInt32)
@btime sample!(
$params_c.rng,
$cache_c.𝑖_,
$cache_c.nodes[1].𝑖,
replace = false,
ordered = true,
);
# 21.684 ms (2 allocations: 7.63 MiB)
@btime sample!(
$params_c.rng,
$cache_c.𝑖_,
$cache_c.nodes[1].𝑖,
replace = false,
ordered = false,
);
# 27.441 ms (0 allocations: 0 bytes)
@btime sample!(
$params_c.rng,
$cache_c.𝑖_,
$cache_c.nodes[1].𝑖,
replace = true,
ordered = true,
);
src = zeros(Bool, length(cache_c.𝑖_))
target = zeros(Bool, length(cache_c.𝑖_))
# 58.000 μs (3 allocations: 976.69 KiB)
@btime rand(Bool, length(src));
# 1.452 ms (3 allocations: 7.63 MiB)
@btime rand(Float64, length(src));
# 507.800 μs (3 allocations: 3.81 MiB)
@btime rand(Float32, length(src));
@btime rand(Float16, length(src));
# 500.000 μs (3 allocations: 3.81 MiB)
@btime rand(UInt32, length(src));
# 244.800 μs (3 allocations: 1.91 MiB)
@btime rand(UInt16, length(src));
# 62.000 μs (3 allocations: 976.69 KiB)
@btime rand(UInt8, length(src));
function get_rand!(mask)
@threads for i in eachindex(mask)
@inbounds mask[i] = rand(UInt8)
end
end
mask = zeros(UInt8, length(cache_c.is))
# 126.100 μs (48 allocations: 5.08 KiB)
@btime get_rand!($mask)
function subsample_kernelA!(mask, cond, out_view)
count = 0
@inbounds for i in eachindex(out_view)
if mask[i] <= cond
count += 1
out_view[count] = i
end
end
return count
end
function subsampleA(out, mask, rowsample)
get_rand!(mask)
cond = round(UInt8, 255 * rowsample)
nblocks = ceil(Int, min(length(out) / 100_000, Threads.nthreads()))
chunk_size = ceil(Int, length(out) / nblocks)
counts = zeros(Int, nblocks)
@threads for bid in eachindex(counts)
i_start = chunk_size * (bid - 1) + 1
i_stop = (bid == nblocks) ? length(out) : i_start + chunk_size - 1
counts[bid] = subsample_kernelA!(mask, cond, view(out, i_start:i_stop))
end
count = 0
@inbounds for bid in eachindex(counts)
i_start = chunk_size * (bid - 1) + 1
view(out, count+1:(count+counts[bid])) .= view(out, i_start:(i_start+counts[bid]-1))
count += counts[bid]
end
return view(out, 1:count)
end
out = zeros(UInt32, 1_000_000)
mask = rand(UInt8, length(out))
@time out_view = subsampleA(out, mask, 0.5);
Int(minimum(out_view))
Int(maximum(out_view))
@btime subsampleA($out, $mask, 0.5);
function subsample(out::AbstractVector, mask::AbstractVector, rowsample::AbstractFloat)
get_rand!(mask)
cond = round(UInt8, 255 * rowsample)
chunk_size = cld(length(out), min(length(out) ÷ 1024, Threads.nthreads()))
nblocks = cld(length(out), chunk_size)
counts = zeros(Int, nblocks)
@threads for bid = 1:nblocks
i_start = chunk_size * (bid - 1) + 1
i_stop = bid == nblocks ? length(out) : i_start + chunk_size - 1
count = 0
i = i_start
for i = i_start:i_stop
if mask[i] <= cond
out[i_start+count] = i
count += 1
end
end
counts[bid] = count
end
counts_cum = cumsum(counts) .- counts
for bid = 1:nblocks
count_cum = counts_cum[bid]
i_start = chunk_size * (bid - 1)
@inbounds for i = 1:counts[bid]
out[count_cum+i] = out[i_start+i]
end
end
return view(out, 1:sum(counts))
end
out = zeros(UInt32, 1_000_000)
mask = rand(UInt8, length(out))
@time out_view = subsample(out, mask, 0.5);
Int(minimum(out_view))
Int(maximum(out_view))
@btime subsample($out, $mask, 0.5);
@code_warntype subsample(out, mask, 0.5);
function debug(n)
for i = 1:n
out = zeros(UInt32, 1_000_000)
mask = rand(UInt8, length(out))
out_view = subsample(out, mask, 0.5)
min = Int(minimum(out_view))
min_count = sum(out_view .== 0)
if min == 0
@info "$min_count 0s at iteration $i"
end
end
end
debug(100)
function get_rand_kernel!(mask)
tix = threadIdx().x
bdx = blockDim().x
bix = blockIdx().x
gdx = gridDim().x
i_max = length(mask)
niter = cld(i_max, bdx * gdx)
for iter = 1:niter
i = tix + bdx * (bix - 1) + bdx * gdx * (iter - 1)
if i <= i_max
mask[i] = rand(UInt8)
end
end
sync_threads()
end
function get_rand_gpu!(mask)
threads = (1024,)
blocks = (256,)
@cuda threads = threads blocks = blocks get_rand_kernel!(mask)
CUDA.synchronize()
end
function subsample_step_1_kernel(out, mask, cond, counts, chunk_size)
bid = blockIdx().x
gdim = gridDim().x
i_start = chunk_size * (bid - 1) + 1
i_stop = bid == gdim ? length(out) : i_start + chunk_size - 1
count = 0
@inbounds for i = i_start:i_stop
@inbounds if mask[i] <= cond
out[i_start+count] = i
count += 1
end
end
sync_threads()
@inbounds counts[bid] = count
sync_threads()
end
function subsample_step_2_kernel(out, counts, counts_cum, chunk_size)
bid = blockIdx().x
count_cum = counts_cum[bid]
i_start = chunk_size * (bid - 1)
@inbounds for i = 1:counts[bid]
out[count_cum+i] = out[i_start+i]
end
sync_threads()
end
function subsample_gpu(out::CuVector, mask::CuVector, rowsample::AbstractFloat)
get_rand_gpu!(mask)
cond = round(UInt8, 255 * rowsample)
chunk_size = cld(length(out), min(length(out) ÷ 128, 2048))
nblocks = cld(length(out), chunk_size)
counts = CUDA.zeros(Int, nblocks)
blocks = (nblocks,)
threads = (1,)
@cuda blocks = nblocks threads = 1 subsample_step_1_kernel(
out,
mask,
cond,
counts,
chunk_size,
)
CUDA.synchronize()
counts_cum = cumsum(counts) - counts
@cuda blocks = nblocks threads = 1 subsample_step_2_kernel(
out,
counts,
counts_cum,
chunk_size,
)
CUDA.synchronize()
return view(out, 1:sum(counts))
end
out = CUDA.zeros(UInt32, 1_000_000)
mask = CUDA.zeros(UInt8, length(out))
CUDA.@time get_rand_gpu!(mask)
# 39.100 μs (5 allocations: 304 bytes)
# @btime get_rand_gpu!(mask)
CUDA.@time out_view = subsample_gpu(out, mask, 0.5);
Int(minimum(out_view))
Int(maximum(out_view))
@btime subsample_gpu(out, mask, 1.0);
##########################################################
# end subsample tests
##########################################################
# 12.058 ms (2998 allocations: 284.89 KiB)
tree = EvoTrees.Tree{L,K,T}(params_c.max_depth)
@time EvoTrees.grow_tree!(
tree,
cache_c.nodes,
params_c,
cache_c.δ𝑤,
cache_c.edges,
cache_c.js,
cache_c.left,
cache_c.left,
cache_c.right,
cache_c.x_bin,
cache_c.monotone_constraints,
)
@code_warntype EvoTrees.grow_tree!(
tree,
cache_c.nodes,
params_c,
cache_c.δ𝑤,
cache_c.edges,
cache_c.js,
cache_c.left,
cache_c.left,
cache_c.right,
cache_c.x_bin,
cache_c.monotone_constraints,
)
@btime EvoTrees.grow_tree!(
$EvoTrees.Tree{L,K,T}(params_c.max_depth),
$cache_c.nodes,
$params_c,
$cache_c.δ𝑤,
$cache_c.edges,
$cache_c.js,
$cache_c.left,
$cache_c.left,
$cache_c.right,
$cache_c.x_bin,
$cache_c.monotone_constraints,
)
# push!(model_c.trees, tree)
# 993.447 μs (75 allocations: 7.17 KiB)
@btime EvoTrees.predict!(cache_c.pred, tree, cache_c.x)
δ𝑤, edges, x_bin, nodes, out, left, right, mask, monotone_constraints = cache_c.δ𝑤,
cache_c.edges,
cache_c.x_bin,
cache_c.nodes,
cache_c.out,
cache_c.left,
cache_c.right,
cache_c.mask,
cache_c.monotone_constraints;
# Float32: 2.984 ms (73 allocations: 6.52 KiB)
# Float64: 5.020 ms (73 allocations: 6.52 KiB)
𝑖 = cache_c.nodes[1].𝑖
# mask = rand(Bool, length(𝑖))
@time EvoTrees.update_hist!(L, nodes[1].h, δ𝑤, x_bin, 𝑖, 𝑗)
@btime EvoTrees.update_hist!($L, $nodes[1].h, $δ𝑤, $x_bin, $𝑖, $𝑗)
@code_warntype EvoTrees.update_hist!(L, nodes[1].h, δ𝑤, x_bin, 𝑖, 𝑗)
j = 1
n = 1
nodes[1].∑ .= vec(sum(δ𝑤[:, 𝑖], dims = 2))
@time EvoTrees.update_gains!(nodes[n], 𝑗, params_c, K, monotone_constraints)
# 12.160 μs (0 allocations: 0 bytes)
@btime EvoTrees.update_gains!($nodes[n], $𝑗, $params_c, $K, $monotone_constraints)
nodes[1].gains;
# @code_warntype EvoTrees.update_gains!(nodes[n], 𝑗, params_c, K, monotone_constraints)
# 5.793 μs (1 allocation: 32 bytes)
best = findmax(nodes[n].gains)
@btime best = findmax(nodes[n].gains)
tree.cond_bin[n] = best[2][1]
tree.feat[n] = best[2][2]
Int.(tree.cond_bin[n])
# tree.cond_bin[n] = 32
# 204.900 μs (1 allocation: 96 bytes)
offset = 0
@time EvoTrees.split_set!(left, right, 𝑖, X_bin, tree.feat[n], tree.cond_bin[n], offset)
@btime EvoTrees.split_set!(
$left,
$right,
$𝑖,
$X_bin,
$tree.feat[n],
$tree.cond_bin[n],
$offset,
)
@code_warntype EvoTrees.split_set!(left, right, 𝑖, X_bin, tree.feat[n], tree.cond_bin[n])
# 186.294 μs (151 allocations: 15.06 KiB)
@time _left, _right = EvoTrees.split_set_threads!(
out,
left,
right,
𝑖,
x_bin,
tree.feat[n],
tree.cond_bin[n],
offset,
);
@btime EvoTrees.split_set_threads!(
$out,
$left,
$right,
$𝑖,
$x_bin,
$tree.feat[n],
$tree.cond_bin[n],
$offset,
);
###################################################
# GPU
###################################################
params_g = EvoTreeRegressor(
T = Float32,
loss = :linear,
nrounds = 100,
lambda = 1.0,
gamma = 0.1,
eta = 0.1,
max_depth = 6,
min_weight = 1.0,
rowsample = 0.5,
colsample = 0.5,
nbins = 64,
);
model_g, cache_g = EvoTrees.init_evotree_gpu(params_g; x_train, y_train);
x_size = size(cache_g.x_bin);
# select random rows and cols
𝑖c = cache_g.𝑖_[sample(
params_g.rng,
cache_g.𝑖_,
ceil(Int, params_g.rowsample * x_size[1]),
replace = false,
ordered = true,
)]
𝑖 = CuVector(𝑖c)
𝑗c = cache_g.𝑗_[sample(
params_g.rng,
cache_g.𝑗_,
ceil(Int, params_g.colsample * x_size[2]),
replace = false,
ordered = true,
)]
𝑗 = CuVector(𝑗c)
cache_g.nodes[1].𝑖 = 𝑖
cache_g.𝑗 .= 𝑗c
L = EvoTrees.Linear
K = 1
T = Float32
# build a new tree
# 144.600 μs (23 allocations: 896 bytes) - 5-6 X time faster on GPU
@time CUDA.@sync EvoTrees.update_grads_gpu!(cache_g.δ𝑤, cache_g.pred, cache_g.y, params_g)
# @btime CUDA.@sync EvoTrees.update_grads_gpu!($params_g.loss, $cache_g.δ𝑤, $cache_g.pred_gpu, $cache_g.Y_gpu)
# sum Gradients of each of the K parameters and bring to CPU
# 33.447 ms (6813 allocations: 307.27 KiB)
tree = EvoTrees.TreeGPU{L,K,T}(params_g.max_depth)
sum(cache_g.δ𝑤[:, cache_g.nodes[1].𝑖], dims = 2)
CUDA.@time EvoTrees.grow_tree_gpu!(
tree,
cache_g.nodes,
params_g,
cache_g.δ𝑤,
cache_g.edges,
CuVector(cache_g.𝑗),
cache_g.out,
cache_g.left,
cache_g.right,
cache_g.x_bin,
cache_g.monotone_constraints,
)
@btime EvoTrees.grow_tree_gpu!(
EvoTrees.TreeGPU{L,K,T}(params_g.max_depth),
cache_g.nodes,
params_g,
$cache_g.δ𝑤,
$cache_g.edges,
$𝑗,
$cache_g.out,
$cache_g.left,
$cache_g.right,
$cache_g.x_bin,
$cache_g.monotone_constraints,
);
@code_warntype EvoTrees.grow_tree_gpu!(
EvoTrees.TreeGPU(params_g.max_depth, model_g.K, params_g.λ),
params_g,
cache_g.δ,
cache_g.hist,
cache_g.histL,
cache_g.histR,
cache_g.gains,
cache_g.edges,
𝑖,
𝑗,
𝑛,
cache_g.x_bin,
);
push!(model_g.trees, tree);
# 2.736 ms (93 allocations: 13.98 KiB)
@time CUDA.@sync EvoTrees.predict!(cache_g.pred_gpu, tree, cache_g.X_bin)
@btime CUDA.@sync EvoTrees.predict!($cache_g.pred_gpu, $tree, $cache_g.X_bin)
###########################
# Tree GPU
###########################
δ𝑤, edges, x_bin, nodes, out, left, right = cache_g.δ𝑤,
cache_g.edges,
cache_g.x_bin,
cache_g.nodes,
cache_g.out,
cache_g.left,
cache_g.right;
# 2.571 ms (1408 allocations: 22.11 KiB)
# 𝑗2 = CuArray(sample(UInt32.(1:100), 50, replace=false, ordered=true))
@time EvoTrees.update_hist_gpu!(nodes[1].h, δ𝑤, x_bin, 𝑖, 𝑗)
@btime EvoTrees.update_hist_gpu!($nodes[1].h, $δ𝑤, $x_bin, $𝑖, $𝑗)
# @code_warntype EvoTrees.update_hist_gpu!(hist, δ, X_bin, 𝑖, 𝑗, 𝑛)
# 72.100 μs (186 allocations: 6.00 KiB)
n = 1
nodes[1].∑ .= vec(sum(δ𝑤[:, 𝑖], dims = 2))
CUDA.@time EvoTrees.update_gains_gpu!(params_g.loss, nodes[n], 𝑗, params_g, K)
@btime EvoTrees.update_gains_gpu!($params_g.loss, $nodes[n], $𝑗, $params_g, $K)
tree = EvoTrees.TreeGPU(params_g.max_depth, model_g.K, params_g.λ)
best = findmax(nodes[n].gains)
if best[2][1] != params_g.nbins && best[1] > nodes[n].gain + params_g.γ
tree.gain[n] = best[1]
tree.cond_bin[n] = best[2][1]
tree.feat[n] = best[2][2]
tree.cond_float[n] = edges[tree.feat[n]][tree.cond_bin[n]]
end
tree.split[n] = tree.cond_bin[n] != 0
tree.feat[n]
Int(tree.cond_bin[n])
# 673.900 μs (600 allocations: 29.39 KiB)
offset = 0
_left, _right = EvoTrees.split_set_threads_gpu!(
out,
left,
right,
𝑖,
X_bin,
tree.feat[n],
tree.cond_bin[n],
offset,
)
@time EvoTrees.split_set_threads_gpu!(
out,
left,
right,
𝑖,
X_bin,
tree.feat[n],
tree.cond_bin[n],
offset,
)
@btime EvoTrees.split_set_threads_gpu!(
$out,
$left,
$right,
$𝑖,
$X_bin,
$tree.feat[n],
$tree.cond_bin[n],
$offset,
)
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 5468 | using Statistics
using StatsBase:sample, sample!
using EvoTrees
using BenchmarkTools
using CUDA
# allocations on transfer
function gpu_to_cpu(dst, src)
copy!(dst, src)
end
function cpu_to_gpu(dst, src)
copy!(dst, src)
end
x1c = zeros(1000, 1000)
x1g = CUDA.rand(1000, 1000)
@time gpu_to_cpu(x1c, x1g);
CUDA.@time gpu_to_cpu(x1c, x1g);
x1c = rand(1000, 1000)
x1g = CUDA.zeros(1000, 1000)
@time cpu_to_gpu(x1g, x1c);
CUDA.@time cpu_to_gpu(x1g, x1c);
function reshape_gpu(x)
reshape(x, 4, size(x, 1) ÷ 4, size(x,2))
end
x2 = CUDA.@time reshape_gpu(x1g);
reshape(x1g, 4, 250, 1000)
# prepare a dataset
features = rand(Int(1.25e6), 100)
# features = rand(100, 10)
X = features
Y = rand(size(X, 1))
𝑖 = collect(1:size(X, 1))
# train-eval split
𝑖_sample = sample(𝑖, size(𝑖, 1), replace=false)
train_size = 0.8
𝑖_train = 𝑖_sample[1:floor(Int, train_size * size(𝑖, 1))]
𝑖_eval = 𝑖_sample[floor(Int, train_size * size(𝑖, 1)) + 1:end]
X_train, X_eval = X[𝑖_train, :], X[𝑖_eval, :]
Y_train, Y_eval = Y[𝑖_train], Y[𝑖_eval]
###################################################
# GPU
###################################################
params_g = EvoTreeRegressor(T=Float32,
loss=:linear, metric=:none,
nrounds=100,
λ=1.0, γ=0.1, η=0.1,
max_depth=2, min_weight=1.0,
rowsample=0.5, colsample=0.5, nbins=64);
model_g, cache_g = EvoTrees.init_evotree_gpu(params_g, X_train, Y_train);
params_g = model_g.params;
X_size = size(cache_g.X_bin);
# select random rows and cols
𝑖c = cache_g.𝑖_[sample(params_g.rng, cache_g.𝑖_, ceil(Int, params_g.rowsample * X_size[1]), replace=false, ordered=true)]
𝑖 = CuVector(𝑖c)
𝑗c = cache_g.𝑗_[sample(params_g.rng, cache_g.𝑗_, ceil(Int, params_g.colsample * X_size[2]), replace=false, ordered=true)]
𝑗 = CuVector(𝑗c)
cache_g.nodes[1].𝑖 = 𝑖
cache_g.𝑗 .= 𝑗
# build a new tree
# 144.600 μs (23 allocations: 896 bytes) - 5-6 X time faster on GPU
@time CUDA.@sync EvoTrees.update_grads_gpu!(params_g.loss, cache_g.δ𝑤, cache_g.pred_gpu, cache_g.Y_gpu)
# @btime CUDA.@sync EvoTrees.update_grads_gpu!($params_g.loss, $cache_g.δ𝑤, $cache_g.pred_gpu, $cache_g.Y_gpu)
# sum Gradients of each of the K parameters and bring to CPU
# 33.447 ms (6813 allocations: 307.27 KiB)
tree = EvoTrees.TreeGPU(params_g.max_depth, model_g.K, params_g.λ)
CUDA.@time EvoTrees.grow_tree_gpu!(tree, cache_g.nodes, params_g, cache_g.δ𝑤, cache_g.edges, cache_g.𝑗, cache_g.out, cache_g.left, cache_g.right, cache_g.X_bin, cache_g.K)
CUDA.@time EvoTrees.grow_tree_gpu!(tree, cache_g.nodes, params_g, cache_g.δ𝑤, cache_g.edges, cache_g.𝑗, cache_g.out, cache_g.left, cache_g.right, cache_g.X_bin, cache_g.K)
# CUDA.@time EvoTrees.grow_tree_gpu!(tree, params_g, cache_g.δ, cache_g.hist, cache_g.histL, cache_g.histR, cache_g.gains, cache_g.edges, 𝑖, 𝑗, 𝑛, cache_g.X_bin);
# @btime EvoTrees.grow_tree_gpu!(EvoTrees.TreeGPU(UInt32($params_g.max_depth), $model_g.K, $params_g.λ), $params_g, $cache_g.δ, $cache_g.hist, $cache_g.histL, $cache_g.histR, $cache_g.gains, $cache_g.edges, $𝑖, $𝑗, $𝑛, $cache_g.X_bin);
# @code_warntype EvoTrees.grow_tree_gpu!(EvoTrees.TreeGPU(params_g.max_depth, model_g.K, params_g.λ), params_g, cache_g.δ, cache_g.hist, cache_g.histL, cache_g.histR, cache_g.gains, cache_g.edges, 𝑖, 𝑗, 𝑛, cache_g.X_bin);
# push!(model_g.trees, tree);
# # 2.736 ms (93 allocations: 13.98 KiB)
# @time CUDA.@sync EvoTrees.predict_gpu!(cache_g.pred_gpu, tree, cache_g.X_bin)
# @btime CUDA.@sync EvoTrees.predict_gpu!($cache_g.pred_gpu, $tree, $cache_g.X_bin)
###########################
# Tree GPU
###########################
δ𝑤, K, edges, X_bin, nodes, out, left, right = cache_g.δ𝑤, cache_g.K, cache_g.edges, cache_g.X_bin, cache_g.nodes, cache_g.out, cache_g.left, cache_g.right;
# 9.613 ms (81 allocations: 13.55 KiB)
# 𝑗2 = CuArray(sample(UInt32.(1:100), 50, replace=false, ordered=true))
# @time EvoTrees.update_hist_gpu!(params_g.loss, nodes[1].h, δ𝑤, X_bin, 𝑖, 𝑗, K)
# println(nodes[1].h)
CUDA.@time EvoTrees.update_hist_gpu!(params_g.loss, nodes[1].h, δ𝑤, X_bin, 𝑖, 𝑗, K)
CUDA.@time EvoTrees.update_hist_gpu!(params_g.loss, nodes[1].h, δ𝑤, X_bin, 𝑖, 𝑗, K)
# @btime EvoTrees.update_hist_gpu!($params_g.loss, $nodes[1].h, $δ𝑤, $X_bin, $𝑖, $𝑗, $K)
# @btime EvoTrees.update_hist_gpu!($nodes[1].h, $δ𝑤, $X_bin, $nodes[1].𝑖, $𝑗)
# @code_warntype EvoTrees.update_hist_gpu!(hist, δ, X_bin, 𝑖, 𝑗, 𝑛)
# depth=1
# nid = 2^(depth - 1):2^(depth) - 1
# # 97.000 μs (159 allocations: 13.09 KiB)
# @time CUDA.@sync EvoTrees.update_gains_gpu!(gains::AbstractArray{T,3}, hist::AbstractArray{T,4}, histL::AbstractArray{T,4}, histR::AbstractArray{T,4}, 𝑗::AbstractVector{S}, params_g, nid, depth);
# @btime CUDA.@sync EvoTrees.update_gains_gpu!(gains::AbstractArray{T,3}, hist::AbstractArray{T,4}, histL::AbstractArray{T,4}, histR::AbstractArray{T,4}, 𝑗::AbstractVector{S}, params_g, nid, depth);
# gains[:,:,1]
# tree = EvoTrees.TreeGPU(UInt32(params_g.max_depth), model_g.K, params_g.λ)
# n = 1
# best = findmax(view(gains, :,:,n))
# if best[2][1] != params_g.nbins && best[1] > -Inf
# tree.gain[n] = best[1]
# tree.feat[n] = best[2][2]
# tree.cond_bin[n] = best[2][1]
# tree.cond_float[n] = edges[tree.feat[n]][tree.cond_bin[n]]
# end
# tree.split[n] = tree.cond_bin[n] != 0
# # 673.900 μs (600 allocations: 29.39 KiB)
# @time CUDA.@sync EvoTrees.update_set_gpu!(𝑛, 𝑖, X_bin, tree.feat, tree.cond_bin, params_g.nbins)
# @btime CUDA.@sync EvoTrees.update_set_gpu!($𝑛, $𝑖, $X_bin, $tree.feat, $tree.cond_bin, $params_g.nbins)
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 1112 | using Revise
using EvoTrees
using MLUtils
using CSV
using DataFrames
using Arrow
using CUDA
using Base.Iterators: partition
using Base.Threads: nthreads, @threads
using Tables
########################################
# create regular dataframe
########################################
nobs = Int(1e6)
nfeats = Int(100)
x_train = rand(nobs, nfeats)
y_train = rand(nobs)
df = DataFrame(x_train, :auto)
df[!, :y] = y_train
path = joinpath(@__DIR__, "..", "data", "arrow-df.arrow")
Arrow.write(path, df)
########################################
# read streaming data
########################################
path = joinpath(@__DIR__, "..", "data", "arrow-df.arrow")
@time dtrain = Arrow.Table(path);
@time dtrain = DataFrame(Arrow.Table(path));
@time dtrain = DataFrame(Arrow.Table(path), copycols=false);
function load_1()
df = DataFrame(Arrow.Table(path), copycols=true)
select!(df, [:x1, :x2, :x3, :x4, :x5])
return df
end
function load_2()
df = DataFrame(Arrow.Table(path), copycols=false)
select!(df, [:x1, :x2, :x3, :x4, :x5])
return df
end
@time df = load_1();
@time df = load_2();
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 1029 | using Revise
using CUDA
# using StaticArrays
using StatsBase: sample
using BenchmarkTools
using Base.Threads: @threads
function hist_cpu!(
hist::Vector,
∇::Matrix,
x_bin::Matrix,
is::AbstractVector,
js::AbstractVector,
)
@threads for j in js
@inbounds @simd for i in is
bin = x_bin[i, j]
hist[j][1, bin] += ∇[1, i]
hist[j][2, bin] += ∇[2, i]
hist[j][3, bin] += ∇[3, i]
end
end
return nothing
end
nbins = 32
nobs = Int(1e6)
nfeats = 100
rowsample = 0.5
colsample = 0.5
x_bin = UInt8.(rand(1:nbins, nobs, nfeats));
∇ = rand(Float32, 3, nobs);
h∇ = [zeros(Float64, 3, nbins) for n in 1:nfeats]
is = sample(1:nobs, Int(round(rowsample * nobs)), replace=false, ordered=true)
js = sample(1:nfeats, Int(round(rowsample * nfeats)), replace=false, ordered=true)
# laptop: 6.886 ms (97 allocations: 10.67 KiB)
# desktop: 3.451 ms (61 allocations: 6.52 KiB)
@time hist_cpu!(h∇, ∇, x_bin, is, js)
@btime hist_cpu!($h∇, $∇, $x_bin, $is, $js)
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 2815 | using Revise
using CUDA
# using StaticArrays
using StatsBase: sample
using BenchmarkTools
################################################
# TODO: test compact aggregation into log2 bins
# iter 5 times to cover 32 bins (8 for 256)
# - each block build histogram for many features -> (k, j)
# -
################################################
# base kernel
function kernel_share_1!(h::CuDeviceArray{T,3}, ∇, x_bin, is) where {T}
nbins = size(h, 2)
tix, tiy, k = threadIdx().x, threadIdx().y, threadIdx().z
bdx, bdy = blockDim().x, blockDim().y
bix, biy = blockIdx().x, blockIdx().y
gdx, gdy = gridDim().x, gridDim().y
j = tiy + (biy - 1) * bdy
shared = @cuDynamicSharedMem(T, 3 * nbins)
fill!(shared, 0)
sync_threads()
i_tot = size(x, 1)
iter = 0
while iter * id * ig < i_tot
i = it + id * (ib - 1) + iter * id * ig
if i <= size(xid, 1) && j <= size(xid, 2)
# depends on shared to be assigned to a single feature
k = 3 * (xid[i, j] - 1)
@inbounds CUDA.atomic_add!(pointer(shared, k + 1), x[i, 1])
@inbounds CUDA.atomic_add!(pointer(shared, k + 2), x[i, 2])
@inbounds CUDA.atomic_add!(pointer(shared, k + 3), x[i, 3])
end
iter += 1
end
sync_threads()
# loop to cover cases where nbins > nthreads
for iter in 1:(nbins - 1) ÷ id + 1
bin_id = it + id * (iter - 1)
if bin_id <= nbins
@inbounds k = Base._to_linear_index(h, 1, bin_id, j)
@inbounds CUDA.atomic_add!(pointer(h, k), shared[3 * (bin_id - 1) + 1])
@inbounds CUDA.atomic_add!(pointer(h, k + 1), shared[3 * (bin_id - 1) + 2])
@inbounds CUDA.atomic_add!(pointer(h, k + 2), shared[3 * (bin_id - 1) + 3])
end
end
# sync_threads()
return nothing
end
# base approach - block built along the cols first, the rows (limit collisions)
function hist_share_1!(h::AbstractArray{T,3}, x::AbstractMatrix{T}, id::AbstractMatrix{S}; MAX_THREADS=256) where {T,S}
thread_i = min(MAX_THREADS, size(id, 1))
thread_j = 1
threads = (thread_i, thread_j)
blocks = ceil.(Int, (16, size(id, 2)))
fill!(h, 0)
@cuda blocks = blocks threads = threads shmem = sizeof(T) * size(h, 2) * 3 kernel_share_1!(h, ∇, x_bin, is)
return
end
nbins = 64
nfeats = 100
nobs = Int(1e6)
hist = zeros(Float32, 3, nbins, ncol)
∇ = rand(Float32, nobs, 3)
# idx = Int64.(rand(1:nbins, items, ncol))
is = UInt8.(rand(1:nbins, nobs, ncol))
hist_gpu = CuArray(hist)
∇_gpu = CuArray(∇)
idx_gpu = CuArray(idx)
@time hist_share_1!(hist, ∇, idx)
@btime hist_share_1!(hist, ∇, idx)
@CUDA.time hist_share_1!(hist_gpu, ∇_gpu, idx_gpu, MAX_THREADS=128)
@btime CUDA.@sync hist_share_1!($hist_gpu, $∇_gpu, $idx_gpu, MAX_THREADS=128)
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 3220 | using Revise
using CUDA
using StatsBase: sample
using BenchmarkTools
# base kernel
function kernel_s4!(h::CuDeviceArray{T,3}, ∇::CuDeviceMatrix{T}, x_bin::CuDeviceMatrix{S}) where {T,S}
nbins = size(h, 2)
it, jt, kt = threadIdx().x, threadIdx().y, threadIdx().z
ib, jb = blockIdx().x, blockIdx().y
id, jd = blockDim().x, blockDim().y
ig, jg = gridDim().x, gridDim().y
j = jt + (jb - 1) * jd
shared = @cuDynamicSharedMem(T, 3 * nbins)
fill!(shared, 0)
sync_threads()
i_tot = size(x_bin, 1)
iter = 0
while iter * id * ig < i_tot
i = it + id * (ib - 1) + iter * id * ig
if i <= size(x_bin, 1) && j <= size(x_bin, 2)
# depends on shared to be assigned to a single feature
k = 3 * (x_bin[i, j] - 1)
@inbounds CUDA.atomic_add!(pointer(shared, k + kt), ∇[i, kt])
# @inbounds CUDA.atomic_add!(pointer(shared, k + 1), ∇[i, 1])
# @inbounds CUDA.atomic_add!(pointer(shared, k + 2), ∇[i, 2])
# @inbounds CUDA.atomic_add!(pointer(shared, k + 3), ∇[i, 3])
end
iter += 1
end
sync_threads()
# loop to cover cases where nbins > nthreads
for iter in 1:(nbins-1)÷id+1
bin_id = it + id * (iter - 1)
if bin_id <= nbins
@inbounds k = Base._to_linear_index(h, 1, bin_id, j) - 1
@inbounds CUDA.atomic_add!(pointer(h, k + kt), shared[3*(bin_id-1)+kt])
# @inbounds CUDA.atomic_add!(pointer(h, k), shared[3*(bin_id-1)+1])
# @inbounds CUDA.atomic_add!(pointer(h, k + 1), shared[3*(bin_id-1)+2])
# @inbounds CUDA.atomic_add!(pointer(h, k + 2), shared[3*(bin_id-1)+3])
end
end
sync_threads()
return nothing
end
# base approach - block built along the cols first, the rows (limit collisions)
function hist_gpu_s4!(h::AbstractArray{T,3}, ∇::AbstractMatrix{T}, x_bin::AbstractMatrix{S}; MAX_THREADS=256) where {T,S}
thread_i = min(MAX_THREADS, size(x_bin, 1))
thread_j = 1
thread_k = 3
threads = (thread_i, thread_j, thread_k)
blocks = ceil.(Int, (16, size(x_bin, 2)))
fill!(h, 0)
@cuda blocks = blocks threads = threads shmem = sizeof(T) * size(h, 2) * 3 kernel_s4!(h, ∇, x_bin)
CUDA.synchronize()
return
end
nbins = 64
nfeats = 100
nobs = Int(1e6)
h = [zeros(Float32, 3, nbins) for feat in 1nfeats];
x_bin = UInt8.(rand(1:nbins, nobs, nfeats));
∇_cpu = rand(Float32, 3, nobs);
h∇_cpu = zeros(Float32, 3, nbins, nfeats)
rowsample = 0.5
colsample = 0.5
is = sample(1:nobs, Int(round(rowsample * nobs)), replace=false, ordered=true)
js = sample(1:nfeats, Int(round(rowsample * nfeats)), replace=false, ordered=true)
∇_gpu = CuArray(∇_cpu)
x_bin_gpu = CuArray(x_bin)
h∇_gpu = CuArray(h∇_cpu)
is_gpu = CuArray(is)
js_gpu = CuArray(js)
@time hist_gpu_s4!(h∇_gpu, ∇_gpu, x_bin_gpu)
CUDA.@time hist_gpu_s4!(h∇_gpu, ∇_gpu, x_bin_gpu)
# desktop | 1K: 41.102 μs (24 allocations: 1.66 KiB)
# desktop | 10K: 59.142 μs (109 allocations: 9.09 KiB)
# desktop | 100K: 251.850 μs (109 allocations: 9.09 KiB)
# desktop | 1M: 2.203 ms (23 allocations: 1.33 KiB)
# desktop | 10M: 25.557 ms (110 allocations: 9.11 KiB)
@btime hist_gpu_s4!(h∇_gpu, ∇_gpu, x_bin_gpu)
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 1626 | using Revise
using CUDA
using StaticArrays
using StatsBase: sample
using BenchmarkTools
nbins = 10
items = Int32(1e6)
hist = zeros(Float32, nbins)
x = ones(Float32, items)
idx = Int64.(rand(1:nbins, items))
hist_gpu = CuArray(hist)
x_gpu = CuArray(x)
idx_gpu = CuArray(idx)
hist .- Array(hist_gpu)
sum(hist) - sum(Array(hist_gpu))
# base kernel
function kernel!(h::CuDeviceVector{T}, x::CuDeviceVector{T}, xid::CuDeviceVector{S}) where {T,S}
nbins = size(h, 1)
it = threadIdx().x
ib = blockIdx().x
id = blockDim().x
shared = @cuDynamicSharedMem(T, nbins)
fill!(shared, 0)
fill!(h, 0)
sync_threads()
i_tot = size(x, 1)
iter = 0
while iter * id < i_tot
i = it + id * iter
if i <= size(xid, 1)
@inbounds k = Base._to_linear_index(h, xid[i])
@inbounds CUDA.atomic_add!(pointer(shared, k), x[i])
end
iter += 1
end
sync_threads()
# loop to cover cases where nbins > nthreads
for i in 1:(nbins - 1) ÷ id + 1
bin_id = it + id * (i - 1)
if bin_id <= nbins
@inbounds CUDA.atomic_add!(pointer(h, bin_id), shared[bin_id])
end
end
return nothing
end
# base approach - block built along the cols first, the rows (limit collisions)
function hist!(h::AbstractVector{T}, x::AbstractVector{T}, xid::AbstractVector{S}; MAX_THREADS=256) where {T,S}
threads = min(MAX_THREADS, size(xid, 1))
@cuda blocks = 1 threads = threads shmem = sizeof(T) * size(h,1) kernel!(h, x, xid)
return
end
@btime CUDA.@sync hist!($hist_gpu, $x_gpu, $idx_gpu, MAX_THREADS=1024)
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 2744 | using Revise
using CUDA
using StatsBase: sample
using BenchmarkTools
"""
hist_kernel!
"""
function hist_kernel!(h∇::CuDeviceArray{T,3}, ∇::CuDeviceMatrix{S}, x_bin, is, js) where {T,S}
tix, tiy, k = threadIdx().z, threadIdx().y, threadIdx().x
bdx, bdy = blockDim().z, blockDim().y
bix, biy = blockIdx().z, blockIdx().y
gdx = gridDim().z
j = tiy + bdy * (biy - 1)
if j <= length(js)
jdx = js[j]
i_max = length(is)
niter = cld(i_max, bdx * gdx)
@inbounds for iter = 1:niter
i = tix + bdx * (bix - 1) + bdx * gdx * (iter - 1)
if i <= i_max
@inbounds idx = is[i]
@inbounds bin = x_bin[idx, jdx]
hid = Base._to_linear_index(h∇, k, bin, jdx)
CUDA.atomic_add!(pointer(h∇, hid), T(∇[k, idx]))
end
end
end
sync_threads()
return nothing
end
function update_hist_gpu!(h, h∇_cpu, h∇, ∇, x_bin, is, js, jsc)
kernel = @cuda launch = false hist_kernel!(h∇, ∇, x_bin, is, js)
config = launch_configuration(kernel.fun)
max_threads = config.threads
max_blocks = config.blocks
k = size(h∇, 1)
ty = max(1, min(length(js), fld(max_threads, k)))
tx = max(1, min(length(is), fld(max_threads, k * ty)))
threads = (k, ty, tx)
by = cld(length(js), ty)
bx = min(cld(max_blocks, by), cld(length(is), tx))
blocks = (1, by, bx)
h∇ .= 0
kernel(h∇, ∇, x_bin, is, js; threads, blocks)
CUDA.synchronize()
copyto!(h∇_cpu, h∇)
Threads.@threads for j in jsc
nbins = size(h[j], 2)
@views h[j] .= h∇_cpu[:, 1:nbins, j]
end
return nothing
end
nbins = 64
nfeats = 100
nobs = Int(1e3)
h = [zeros(Float32, 3, nbins) for feat in 1:nfeats];
x_bin = UInt8.(rand(1:nbins, nobs, nfeats));
∇_cpu = rand(Float32, 3, nobs);
h∇_cpu = rand(Float32, 3, nbins, nfeats)
rowsample = 0.5
colsample = 0.5
is = sample(1:nobs, Int(round(rowsample * nobs)), replace=false, ordered=true)
js = sample(1:nfeats, Int(round(rowsample * nfeats)), replace=false, ordered=true)
hist_gpu = CuArray(hist)
∇_gpu = CuArray(∇_cpu)
x_bin_gpu = CuArray(x_bin)
h∇_gpu = CuArray(h∇_cpu)
is_gpu = CuArray(is)
js_gpu = CuArray(js)
@time update_hist_gpu!(h, h∇_cpu, h∇_gpu, ∇_gpu, x_bin_gpu, is_gpu, js_gpu, js)
CUDA.@time update_hist_gpu!(h, h∇_cpu, h∇_gpu, ∇_gpu, x_bin_gpu, is_gpu, js_gpu, js)
# desktop | 1K: 46.332 μs (109 allocations: 9.09 KiB)
# desktop | 10K: 59.142 μs (109 allocations: 9.09 KiB)
# desktop | 100K: 251.850 μs (109 allocations: 9.09 KiB)
# desktop | 1M: 2.328 ms (110 allocations: 9.11 KiB)
# desktop | 10M: 25.557 ms (110 allocations: 9.11 KiB)
@btime update_hist_gpu!(h, h∇_cpu, h∇_gpu, ∇_gpu, x_bin_gpu, is_gpu, js_gpu, js)
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 501 | module EvoTreesCUDAExt
using EvoTrees
using CUDA
# This should be different on CPUs and GPUs
EvoTrees.device_ones(::Type{<:EvoTrees.GPU}, ::Type{T}, n::Int) where {T} = CUDA.ones(T, n)
EvoTrees.device_array_type(::Type{<:EvoTrees.GPU}) = CuArray
function EvoTrees.post_fit_gc(::Type{<:EvoTrees.GPU})
GC.gc(true)
CUDA.reclaim()
end
include("loss.jl")
include("eval.jl")
include("predict.jl")
include("init.jl")
include("subsample.jl")
include("fit-utils.jl")
include("fit.jl")
end # module
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 6659 | ########################
# MSE
########################
function eval_mse_kernel!(eval::CuDeviceVector{T}, p::CuDeviceMatrix{T}, y::CuDeviceVector{T}, w::CuDeviceVector{T}) where {T<:AbstractFloat}
i = threadIdx().x + (blockIdx().x - 1) * blockDim().x
if i <= length(y)
@inbounds eval[i] = w[i] * (p[1, i] - y[i])^2
end
return nothing
end
function EvoTrees.mse(p::CuMatrix{T}, y::CuVector{T}, w::CuVector{T}, eval::CuVector{T}; MAX_THREADS=1024, kwargs...) where {T<:AbstractFloat}
threads = min(MAX_THREADS, length(y))
blocks = cld(length(y), threads)
@cuda blocks = blocks threads = threads eval_mse_kernel!(eval, p, y, w)
CUDA.synchronize()
return sum(eval) / sum(w)
end
########################
# RMSE
########################
EvoTrees.rmse(p::CuMatrix{T}, y::CuVector{T}, w::CuVector{T}, eval::CuVector{T}; MAX_THREADS=1024, kwargs...) where {T<:AbstractFloat} =
sqrt(EvoTrees.rmse(p, y, w; MAX_THREADS, kwargs...))
########################
# MAE
########################
function eval_mae_kernel!(eval::CuDeviceVector{T}, p::CuDeviceMatrix{T}, y::CuDeviceVector{T}, w::CuDeviceVector{T}) where {T<:AbstractFloat}
i = threadIdx().x + (blockIdx().x - 1) * blockDim().x
if i <= length(y)
@inbounds eval[i] = w[i] * abs(p[1, i] - y[i])
end
return nothing
end
function EvoTrees.mae(p::CuMatrix{T}, y::CuVector{T}, w::CuVector{T}, eval::CuVector{T}; MAX_THREADS=1024, kwargs...) where {T<:AbstractFloat}
threads = min(MAX_THREADS, length(y))
blocks = cld(length(y), threads)
@cuda blocks = blocks threads = threads eval_mae_kernel!(eval, p, y, w)
CUDA.synchronize()
return sum(eval) / sum(w)
end
########################
# Logloss
########################
function eval_logloss_kernel!(eval::CuDeviceVector{T}, p::CuDeviceMatrix{T}, y::CuDeviceVector{T}, w::CuDeviceVector{T}) where {T<:AbstractFloat}
i = threadIdx().x + (blockIdx().x - 1) * blockDim().x
if i <= length(y)
@inbounds pred = EvoTrees.sigmoid(p[1, i])
@inbounds eval[i] = w[i] * (-y[i] * log(pred) + (y[i] - 1) * log(1 - pred))
end
return nothing
end
function EvoTrees.logloss(p::CuMatrix{T}, y::CuVector{T}, w::CuVector{T}, eval::CuVector{T}; MAX_THREADS=1024, kwargs...) where {T<:AbstractFloat}
threads = min(MAX_THREADS, length(y))
blocks = cld(length(y), threads)
@cuda blocks = blocks threads = threads eval_logloss_kernel!(eval, p, y, w)
CUDA.synchronize()
return sum(eval) / sum(w)
end
########################
# Gaussian
########################
function eval_gaussian_kernel!(eval::CuDeviceVector{T}, p::CuDeviceMatrix{T}, y::CuDeviceVector{T}, w::CuDeviceVector{T}) where {T<:AbstractFloat}
i = threadIdx().x + (blockIdx().x - 1) * blockDim().x
if i <= length(y)
@inbounds eval[i] = -w[i] * (p[2, i] + (y[i] - p[1, i])^2 / (2 * exp(2 * p[2, i])))
end
return nothing
end
function EvoTrees.gaussian_mle(p::CuMatrix{T}, y::CuVector{T}, w::CuVector{T}, eval::CuVector{T}; MAX_THREADS=1024, kwargs...) where {T<:AbstractFloat}
threads = min(MAX_THREADS, length(y))
blocks = cld(length(y), threads)
@cuda blocks = blocks threads = threads eval_gaussian_kernel!(eval, p, y, w)
CUDA.synchronize()
return sum(eval) / sum(w)
end
########################
# Poisson Deviance
########################
function eval_poisson_kernel!(eval::CuDeviceVector{T}, p::CuDeviceMatrix{T}, y::CuDeviceVector{T}, w::CuDeviceVector{T}) where {T<:AbstractFloat}
i = threadIdx().x + (blockIdx().x - 1) * blockDim().x
ϵ = eps(T(1e-7))
if i <= length(y)
@inbounds pred = exp(p[1, i])
@inbounds eval[i] = w[i] * 2 * (y[i] * log(y[i] / pred + ϵ) + pred - y[i])
end
return nothing
end
function EvoTrees.poisson(p::CuMatrix{T}, y::CuVector{T}, w::CuVector{T}, eval::CuVector{T}; MAX_THREADS=1024, kwargs...) where {T<:AbstractFloat}
threads = min(MAX_THREADS, length(y))
blocks = cld(length(y), threads)
@cuda blocks = blocks threads = threads eval_poisson_kernel!(eval, p, y, w)
CUDA.synchronize()
return sum(eval) / sum(w)
end
########################
# Gamma Deviance
########################
function eval_gamma_kernel!(eval::CuDeviceVector{T}, p::CuDeviceMatrix{T}, y::CuDeviceVector{T}, w::CuDeviceVector{T}) where {T<:AbstractFloat}
i = threadIdx().x + (blockIdx().x - 1) * blockDim().x
if i <= length(y)
@inbounds pred = exp(p[1, i])
@inbounds eval[i] = w[i] * 2 * (log(pred / y[i]) + y[i] / pred - 1)
end
return nothing
end
function EvoTrees.gamma(p::CuMatrix{T}, y::CuVector{T}, w::CuVector{T}, eval::CuVector{T}; MAX_THREADS=1024, kwargs...) where {T<:AbstractFloat}
threads = min(MAX_THREADS, length(y))
blocks = cld(length(y), threads)
@cuda blocks = blocks threads = threads eval_gamma_kernel!(eval, p, y, w)
CUDA.synchronize()
return sum(eval) / sum(w)
end
########################
# Tweedie Deviance
########################
function eval_tweedie_kernel!(eval::CuDeviceVector{T}, p::CuDeviceMatrix{T}, y::CuDeviceVector{T}, w::CuDeviceVector{T}) where {T<:AbstractFloat}
i = threadIdx().x + (blockIdx().x - 1) * blockDim().x
rho = T(1.5)
if i <= length(y)
pred = exp(p[1, i])
@inbounds eval[i] = w[i] * 2 * (y[i]^(2 - rho) / (1 - rho) / (2 - rho) - y[i] * pred^(1 - rho) / (1 - rho) + pred^(2 - rho) / (2 - rho))
end
return nothing
end
function EvoTrees.tweedie(p::CuMatrix{T}, y::CuVector{T}, w::CuVector{T}, eval::CuVector{T}; MAX_THREADS=1024, kwargs...) where {T<:AbstractFloat}
threads = min(MAX_THREADS, length(y))
blocks = cld(length(y), threads)
@cuda blocks = blocks threads = threads eval_tweedie_kernel!(eval, p, y, w)
CUDA.synchronize()
return sum(eval) / sum(w)
end
########################
# mlogloss
########################
function eval_mlogloss_kernel!(eval::CuDeviceVector{T}, p::CuDeviceMatrix{T}, y::CuDeviceVector, w::CuDeviceVector{T}) where {T<:AbstractFloat}
i = threadIdx().x + (blockIdx().x - 1) * blockDim().x
K = size(p, 1)
if i <= length(y)
isum = zero(T)
@inbounds for k in 1:K
isum += exp(p[k, i])
end
@inbounds eval[i] = w[i] * (log(isum) - p[y[i], i])
end
return nothing
end
function EvoTrees.mlogloss(p::CuMatrix{T}, y::CuVector, w::CuVector{T}, eval::CuVector{T}; MAX_THREADS=1024, kwargs...) where {T<:AbstractFloat}
threads = min(MAX_THREADS, length(y))
blocks = cld(length(y), threads)
@cuda blocks = blocks threads = threads eval_mlogloss_kernel!(eval, p, y, w)
CUDA.synchronize()
return sum(eval) / sum(w)
end
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 4623 | function hist_kernel!(h∇::CuDeviceArray{T,3}, ∇::CuDeviceMatrix{S}, x_bin, is, js) where {T,S}
tix, tiy, k = threadIdx().z, threadIdx().y, threadIdx().x
bdx, bdy = blockDim().z, blockDim().y
bix, biy = blockIdx().z, blockIdx().y
gdx = gridDim().z
j = tiy + bdy * (biy - 1)
if j <= length(js)
jdx = js[j]
i_max = length(is)
niter = cld(i_max, bdx * gdx)
@inbounds for iter = 1:niter
i = tix + bdx * (bix - 1) + bdx * gdx * (iter - 1)
if i <= i_max
@inbounds idx = is[i]
@inbounds bin = x_bin[idx, jdx]
hid = Base._to_linear_index(h∇, k, bin, jdx)
CUDA.atomic_add!(pointer(h∇, hid), T(∇[k, idx]))
end
end
end
sync_threads()
return nothing
end
function update_hist_gpu!(h, h∇_cpu, h∇, ∇, x_bin, is, js, jsc)
kernel = @cuda launch = false hist_kernel!(h∇, ∇, x_bin, is, js)
config = launch_configuration(kernel.fun)
max_threads = config.threads
max_blocks = config.blocks
k = size(h∇, 1)
ty = max(1, min(length(js), fld(max_threads, k)))
tx = min(64, max(1, min(length(is), fld(max_threads, k * ty))))
threads = (k, ty, tx)
max_blocks = min(65535, max_blocks * fld(max_threads, prod(threads)))
by = cld(length(js), ty)
bx = min(cld(max_blocks, by), cld(length(is), tx))
blocks = (1, by, bx)
h∇ .= 0
kernel(h∇, ∇, x_bin, is, js; threads, blocks)
CUDA.synchronize()
copyto!(h∇_cpu, h∇)
Threads.@threads for j in jsc
nbins = size(h[j], 2)
@views h[j] .= h∇_cpu[:, 1:nbins, j]
end
return nothing
end
# Multi-threads split_set!
# Take a view into left and right placeholders. Right ids are assigned at the end of the length of the current node set.
function split_chunk_kernel!(
left::CuDeviceVector{S},
right::CuDeviceVector{S},
is::CuDeviceVector{S},
x_bin,
feat,
cond_bin,
feattype,
offset,
chunk_size,
lefts,
rights,
) where {S}
it = threadIdx().x
bid = blockIdx().x
gdim = gridDim().x
left_count = 0
right_count = 0
i = chunk_size * (bid - 1) + 1
bid == gdim ? bsize = length(is) - chunk_size * (bid - 1) : bsize = chunk_size
i_max = i + bsize - 1
@inbounds while i <= i_max
cond = feattype ? x_bin[is[i], feat] <= cond_bin : x_bin[is[i], feat] == cond_bin
if cond
left_count += 1
left[offset+chunk_size*(bid-1)+left_count] = is[i]
else
right_count += 1
right[offset+chunk_size*(bid-1)+right_count] = is[i]
end
i += 1
end
lefts[bid] = left_count
rights[bid] = right_count
sync_threads()
return nothing
end
function EvoTrees.split_views_kernel!(
out::CuDeviceVector{S},
left::CuDeviceVector{S},
right::CuDeviceVector{S},
offset,
chunk_size,
lefts,
rights,
sum_lefts,
cumsum_lefts,
cumsum_rights,
) where {S}
bid = blockIdx().x
bid == 1 ? cumsum_left = 0 : cumsum_left = cumsum_lefts[bid-1]
bid == 1 ? cumsum_right = 0 : cumsum_right = cumsum_rights[bid-1]
iter = 1
i_max = lefts[bid]
@inbounds while iter <= i_max
out[offset+cumsum_left+iter] = left[offset+chunk_size*(bid-1)+iter]
iter += 1
end
iter = 1
i_max = rights[bid]
@inbounds while iter <= i_max
out[offset+sum_lefts+cumsum_right+iter] = right[offset+chunk_size*(bid-1)+iter]
iter += 1
end
sync_threads()
return nothing
end
function split_set_threads_gpu!(out, left, right, is, x_bin, feat, cond_bin, feattype, offset)
chunk_size = cld(length(is), min(cld(length(is), 128), 2048))
nblocks = cld(length(is), chunk_size)
lefts = CUDA.zeros(Int, nblocks)
rights = CUDA.zeros(Int, nblocks)
# threads = 1
@cuda blocks = nblocks threads = 1 split_chunk_kernel!(
left,
right,
is,
x_bin,
feat,
cond_bin,
feattype,
offset,
chunk_size,
lefts,
rights,
)
CUDA.synchronize()
sum_lefts = sum(lefts)
cumsum_lefts = cumsum(lefts)
cumsum_rights = cumsum(rights)
@cuda blocks = nblocks threads = 1 EvoTrees.split_views_kernel!(
out,
left,
right,
offset,
chunk_size,
lefts,
rights,
sum_lefts,
cumsum_lefts,
cumsum_rights,
)
CUDA.synchronize()
return (
view(out, offset+1:offset+sum_lefts),
view(out, offset+sum_lefts+1:offset+length(is)),
)
end
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 10470 | function EvoTrees.grow_evotree!(evotree::EvoTree{L,K}, cache, params::EvoTrees.EvoTypes{L}, ::Type{<:EvoTrees.GPU}) where {L,K}
# compute gradients
EvoTrees.update_grads!(cache.∇, cache.pred, cache.y, params)
# subsample rows
cache.nodes[1].is =
EvoTrees.subsample(cache.is_in, cache.is_out, cache.mask, params.rowsample, params.rng)
# subsample cols
EvoTrees.sample!(params.rng, cache.js_, cache.js, replace=false, ordered=true)
# assign a root and grow tree
tree = EvoTrees.Tree{L,K}(params.max_depth)
grow! = params.tree_type == "oblivious" ? grow_otree! : grow_tree!
grow!(
tree,
cache.nodes,
params,
cache.∇,
cache.edges,
cache.js,
cache.out,
cache.left,
cache.right,
cache.h∇_cpu,
cache.h∇,
cache.x_bin,
cache.feattypes,
cache.monotone_constraints,
)
push!(evotree.trees, tree)
EvoTrees.predict!(cache.pred, tree, cache.x_bin, cache.feattypes_gpu)
cache[:info][:nrounds] += 1
return nothing
end
# grow a single binary tree - grow through all depth
function grow_tree!(
tree::EvoTrees.Tree{L,K},
nodes::Vector{N},
params::EvoTrees.EvoTypes{L},
∇::CuMatrix,
edges,
js,
out,
left,
right,
h∇_cpu::Array{Float64,3},
h∇::CuArray{Float64,3},
x_bin::CuMatrix,
feattypes::Vector{Bool},
monotone_constraints,
) where {L,K,N}
jsg = CuVector(js)
# reset nodes
for n in nodes
n.∑ .= 0
n.gain = 0.0
@inbounds for i in eachindex(n.h)
n.h[i] .= 0
n.gains[i] .= 0
end
end
# initialize
n_current = [1]
depth = 1
# initialize summary stats
nodes[1].∑ .= Vector(vec(sum(∇[:, nodes[1].is], dims=2)))
nodes[1].gain = EvoTrees.get_gain(params, nodes[1].∑) # should use a GPU version?
# grow while there are remaining active nodes
while length(n_current) > 0 && depth <= params.max_depth
offset = 0 # identifies breakpoint for each node set within a depth
n_next = Int[]
if depth < params.max_depth
for n_id in eachindex(n_current)
n = n_current[n_id]
if n_id % 2 == 0
if n % 2 == 0
@inbounds for j in js
nodes[n].h[j] .= nodes[n>>1].h[j] .- nodes[n+1].h[j]
end
else
@inbounds for j in js
nodes[n].h[j] .= nodes[n>>1].h[j] .- nodes[n-1].h[j]
end
end
else
update_hist_gpu!(nodes[n].h, h∇_cpu, h∇, ∇, x_bin, nodes[n].is, jsg, js)
end
end
Threads.@threads for n ∈ sort(n_current)
EvoTrees.update_gains!(nodes[n], js, params, feattypes, monotone_constraints)
end
end
for n ∈ sort(n_current)
if depth == params.max_depth || nodes[n].∑[end] <= params.min_weight
EvoTrees.pred_leaf_cpu!(tree.pred, n, nodes[n].∑, params, ∇, nodes[n].is)
else
best = findmax(findmax.(nodes[n].gains))
best_gain = best[1][1]
best_bin = best[1][2]
best_feat = best[2]
if best_gain > nodes[n].gain + params.gamma
tree.gain[n] = best_gain - nodes[n].gain
tree.cond_bin[n] = best_bin
tree.feat[n] = best_feat
tree.cond_float[n] = edges[tree.feat[n]][tree.cond_bin[n]]
tree.split[n] = best_bin != 0
_left, _right = split_set_threads_gpu!(
out,
left,
right,
nodes[n].is,
x_bin,
tree.feat[n],
tree.cond_bin[n],
feattypes[best_feat],
offset,
)
offset += length(nodes[n].is)
nodes[n<<1].is, nodes[n<<1+1].is = _left, _right
nodes[n<<1].∑ .= nodes[n].hL[best_feat][:, best_bin]
nodes[n<<1+1].∑ .= nodes[n].hR[best_feat][:, best_bin]
nodes[n<<1].gain = EvoTrees.get_gain(params, nodes[n<<1].∑)
nodes[n<<1+1].gain = EvoTrees.get_gain(params, nodes[n<<1+1].∑)
if length(_right) >= length(_left)
push!(n_next, n << 1)
push!(n_next, n << 1 + 1)
else
push!(n_next, n << 1 + 1)
push!(n_next, n << 1)
end
else
EvoTrees.pred_leaf_cpu!(tree.pred, n, nodes[n].∑, params, ∇, nodes[n].is)
end
end
end
n_current = copy(n_next)
depth += 1
end # end of loop over active ids for a given depth
return nothing
end
# grow a single oblivious tree - grow through all depth
function grow_otree!(
tree::EvoTrees.Tree{L,K},
nodes::Vector{N},
params::EvoTrees.EvoTypes{L},
∇::CuMatrix,
edges,
js,
out,
left,
right,
h∇_cpu::Array{Float64,3},
h∇::CuArray{Float64,3},
x_bin::CuMatrix,
feattypes::Vector{Bool},
monotone_constraints,
) where {L,K,N}
jsg = CuVector(js)
# reset nodes
for n in nodes
n.∑ .= 0
n.gain = 0.0
@inbounds for i in eachindex(n.h)
n.h[i] .= 0
n.gains[i] .= 0
end
end
# initialize
n_current = [1]
depth = 1
# initialize summary stats
nodes[1].∑ .= Vector(vec(sum(∇[:, nodes[1].is], dims=2)))
nodes[1].gain = EvoTrees.get_gain(params, nodes[1].∑) # should use a GPU version?
# grow while there are remaining active nodes
while length(n_current) > 0 && depth <= params.max_depth
offset = 0 # identifies breakpoint for each node set within a depth
n_next = Int[]
min_weight_flag = false
for n in n_current
nodes[n].∑[end] <= params.min_weight ? min_weight_flag = true : nothing
end
if depth == params.max_depth || min_weight_flag
for n in n_current
# @info "length(nodes[n].is)" length(nodes[n].is) depth n
EvoTrees.pred_leaf_cpu!(tree.pred, n, nodes[n].∑, params, ∇, nodes[n].is)
end
else
# update histograms
for n_id in eachindex(n_current)
n = n_current[n_id]
if n_id % 2 == 0
if n % 2 == 0
@inbounds for j in js
nodes[n].h[j] .= nodes[n>>1].h[j] .- nodes[n+1].h[j]
end
else
@inbounds for j in js
nodes[n].h[j] .= nodes[n>>1].h[j] .- nodes[n-1].h[j]
end
end
else
update_hist_gpu!(nodes[n].h, h∇_cpu, h∇, ∇, x_bin, nodes[n].is, jsg, js)
end
end
Threads.@threads for n ∈ n_current
EvoTrees.update_gains!(nodes[n], js, params, feattypes, monotone_constraints)
end
# initialize gains for node 1 in which all gains of a given depth will be accumulated
if depth > 1
@inbounds for j in js
nodes[1].gains[j] .= 0
end
end
gain = 0
# update gains based on the aggregation of all nodes of a given depth. One gains matrix per depth (vs one per node in binary trees).
for n ∈ sort(n_current)
if n > 1 # accumulate gains in node 1
for j in js
nodes[1].gains[j] .+= nodes[n].gains[j]
end
end
gain += nodes[n].gain
end
for n ∈ sort(n_current)
if n > 1
for j in js
nodes[1].gains[j] .*= nodes[n].gains[j] .> 0 #mask ignore gains if any node isn't eligible (too small per leaf weight)
end
end
end
# find best split
best = findmax(findmax.(nodes[1].gains))
best_gain = best[1][1]
best_bin = best[1][2]
best_feat = best[2]
if best_gain > gain + params.gamma
for n in sort(n_current)
tree.gain[n] = best_gain - nodes[n].gain
tree.cond_bin[n] = best_bin
tree.feat[n] = best_feat
tree.cond_float[n] = edges[best_feat][best_bin]
tree.split[n] = best_bin != 0
_left, _right = split_set_threads_gpu!(
out,
left,
right,
nodes[n].is,
x_bin,
tree.feat[n],
tree.cond_bin[n],
feattypes[best_feat],
offset,
)
offset += length(nodes[n].is)
nodes[n<<1].is, nodes[n<<1+1].is = _left, _right
nodes[n<<1].∑ .= nodes[n].hL[best_feat][:, best_bin]
nodes[n<<1+1].∑ .= nodes[n].hR[best_feat][:, best_bin]
nodes[n<<1].gain = EvoTrees.get_gain(params, nodes[n<<1].∑)
nodes[n<<1+1].gain = EvoTrees.get_gain(params, nodes[n<<1+1].∑)
if length(_right) >= length(_left)
push!(n_next, n << 1)
push!(n_next, n << 1 + 1)
else
push!(n_next, n << 1 + 1)
push!(n_next, n << 1)
end
end
else
for n in n_current
EvoTrees.pred_leaf_cpu!(tree.pred, n, nodes[n].∑, params, ∇, nodes[n].is)
end
end
end
n_current = copy(n_next)
depth += 1
end # end of loop over current nodes for a given depth
return nothing
end
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 4551 | function EvoTrees.init_core(params::EvoTrees.EvoTypes{L}, ::Type{<:EvoTrees.GPU}, data, fnames, y_train, w, offset) where {L}
# binarize data into quantiles
edges, featbins, feattypes = EvoTrees.get_edges(data; fnames, nbins=params.nbins, rng=params.rng)
x_bin = CuArray(EvoTrees.binarize(data; fnames, edges))
nobs, nfeats = size(x_bin)
T = Float32
target_levels = nothing
target_isordered = false
if L == EvoTrees.Logistic
@assert eltype(y_train) <: Real && minimum(y_train) >= 0 && maximum(y_train) <= 1
K = 1
y = T.(y_train)
μ = [EvoTrees.logit(EvoTrees.mean(y))]
!isnothing(offset) && (offset .= EvoTrees.logit.(offset))
elseif L in [EvoTrees.Poisson, EvoTrees.Gamma, EvoTrees.Tweedie]
@assert eltype(y_train) <: Real
K = 1
y = T.(y_train)
μ = fill(log(EvoTrees.mean(y)), 1)
!isnothing(offset) && (offset .= log.(offset))
elseif L == EvoTrees.MLogLoss
if eltype(y_train) <: EvoTrees.CategoricalValue
target_levels = EvoTrees.CategoricalArrays.levels(y_train)
target_isordered = isordered(y_train)
y = UInt32.(EvoTrees.CategoricalArrays.levelcode.(y_train))
elseif eltype(y_train) <: Integer || eltype(y_train) <: Bool || eltype(y_train) <: String || eltype(y_train) <: Char
target_levels = sort(unique(y_train))
yc = EvoTrees.CategoricalVector(y_train, levels=target_levels)
y = UInt32.(EvoTrees.CategoricalArrays.levelcode.(yc))
else
@error "Invalid target eltype: $(eltype(y_train))"
end
K = length(target_levels)
μ = T.(log.(EvoTrees.proportions(y, UInt32(1):UInt32(K))))
μ .-= maximum(μ)
!isnothing(offset) && (offset .= log.(offset))
elseif L == EvoTrees.GaussianMLE
@assert eltype(y_train) <: Real
K = 2
y = T.(y_train)
μ = [EvoTrees.mean(y), log(EvoTrees.std(y))]
!isnothing(offset) && (offset[:, 2] .= log.(offset[:, 2]))
elseif L == EvoTrees.LogisticMLE
@assert eltype(y_train) <: Real
K = 2
y = T.(y_train)
μ = [EvoTrees.mean(y), log(EvoTrees.std(y) * sqrt(3) / π)]
!isnothing(offset) && (offset[:, 2] .= log.(offset[:, 2]))
else
@assert eltype(y_train) <: Real
K = 1
y = T.(y_train)
μ = [EvoTrees.mean(y)]
end
y = CuArray(y)
μ = T.(μ)
# force a neutral/zero bias/initial tree when offset is specified
!isnothing(offset) && (μ .= 0)
# initialize preds
pred = CUDA.zeros(T, K, nobs)
pred .= CuArray(μ)
!isnothing(offset) && (pred .+= CuArray(offset'))
# initialize gradients
h∇_cpu = zeros(Float64, 2 * K + 1, maximum(featbins), length(featbins))
h∇ = CuArray(h∇_cpu)
∇ = CUDA.zeros(T, 2 * K + 1, nobs)
@assert (length(y) == length(w) && minimum(w) > 0)
∇[end, :] .= w
# initialize indexes
is_in = CUDA.zeros(UInt32, nobs)
is_out = CUDA.zeros(UInt32, nobs)
mask = CUDA.zeros(UInt8, nobs)
js_ = UInt32.(collect(1:nfeats))
js = zeros(eltype(js_), ceil(Int, params.colsample * nfeats))
out = CUDA.zeros(UInt32, nobs)
left = CUDA.zeros(UInt32, nobs)
right = CUDA.zeros(UInt32, nobs)
# assign monotone contraints in constraints vector
monotone_constraints = zeros(Int32, nfeats)
hasproperty(params, :monotone_constraints) && for (k, v) in params.monotone_constraints
monotone_constraints[k] = v
end
# model info
info = Dict(
:fnames => fnames,
:target_levels => target_levels,
:target_isordered => target_isordered,
:edges => edges,
:featbins => featbins,
:feattypes => feattypes,
)
# initialize model
nodes = [EvoTrees.TrainNode(featbins, K, view(is_in, 1:0)) for n = 1:2^params.max_depth-1]
bias = [EvoTrees.Tree{L,K}(μ)]
m = EvoTree{L,K}(bias, info)
# build cache
cache = (
info=Dict(:nrounds => 0),
x_bin=x_bin,
y=y,
w=w,
K=K,
nodes=nodes,
pred=pred,
is_in=is_in,
is_out=is_out,
mask=mask,
js_=js_,
js=js,
out=out,
left=left,
right=right,
∇=∇,
h∇=h∇,
h∇_cpu=h∇_cpu,
fnames=fnames,
edges=edges,
featbins=featbins,
feattypes=feattypes,
feattypes_gpu=CuArray(feattypes),
monotone_constraints=monotone_constraints,
)
return m, cache
end
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 5870 | #####################
# MSE
#####################
function kernel_mse_∇!(∇::CuDeviceMatrix, p::CuDeviceMatrix, y::CuDeviceVector)
i = threadIdx().x + (blockIdx().x - 1) * blockDim().x
if i <= length(y)
@inbounds ∇[1, i] = 2 * (p[i] - y[i]) * ∇[3, i]
@inbounds ∇[2, i] = 2 * ∇[3, i]
end
return
end
function EvoTrees.update_grads!(
∇::CuMatrix,
p::CuMatrix,
y::CuVector,
::EvoTreeRegressor{<:EvoTrees.MSE};
MAX_THREADS=1024
)
threads = min(MAX_THREADS, length(y))
blocks = cld(length(y), threads)
@cuda blocks = blocks threads = threads kernel_mse_∇!(∇, p, y)
CUDA.synchronize()
return
end
#####################
# Logistic
#####################
function kernel_logloss_∇!(∇::CuDeviceMatrix, p::CuDeviceMatrix, y::CuDeviceVector)
i = threadIdx().x + (blockIdx().x - 1) * blockDim().x
if i <= length(y)
@inbounds pred = EvoTrees.sigmoid(p[1, i])
@inbounds ∇[1, i] = (pred - y[i]) * ∇[3, i]
@inbounds ∇[2, i] = pred * (1 - pred) * ∇[3, i]
end
return
end
function EvoTrees.update_grads!(
∇::CuMatrix,
p::CuMatrix,
y::CuVector,
::EvoTreeRegressor{<:EvoTrees.LogLoss};
MAX_THREADS=1024
)
threads = min(MAX_THREADS, length(y))
blocks = cld(length(y), threads)
@cuda blocks = blocks threads = threads kernel_logloss_∇!(∇, p, y)
CUDA.synchronize()
return
end
#####################
# Poisson
#####################
function kernel_poisson_∇!(∇::CuDeviceMatrix, p::CuDeviceMatrix, y::CuDeviceVector)
i = threadIdx().x + (blockIdx().x - 1) * blockDim().x
if i <= length(y)
@inbounds pred = exp(p[1, i])
@inbounds ∇[1, i] = (pred - y[i]) * ∇[3, i]
@inbounds ∇[2, i] = pred * ∇[3, i]
end
return
end
function EvoTrees.update_grads!(
∇::CuMatrix,
p::CuMatrix,
y::CuVector,
::EvoTreeCount{<:EvoTrees.Poisson};
MAX_THREADS=1024
)
threads = min(MAX_THREADS, length(y))
blocks = cld(length(y), threads)
@cuda blocks = blocks threads = threads kernel_poisson_∇!(∇, p, y)
CUDA.synchronize()
return
end
#####################
# Gamma
#####################
function kernel_gamma_∇!(∇::CuDeviceMatrix, p::CuDeviceMatrix, y::CuDeviceVector)
i = threadIdx().x + (blockIdx().x - 1) * blockDim().x
if i <= length(y)
pred = exp(p[1, i])
@inbounds ∇[1, i] = 2 * (1 - y[i] / pred) * ∇[3, i]
@inbounds ∇[2, i] = 2 * y[i] / pred * ∇[3, i]
end
return
end
function EvoTrees.update_grads!(
∇::CuMatrix,
p::CuMatrix,
y::CuVector,
::EvoTreeRegressor{<:EvoTrees.Gamma};
MAX_THREADS=1024
)
threads = min(MAX_THREADS, length(y))
blocks = cld(length(y), threads)
@cuda blocks = blocks threads = threads kernel_gamma_∇!(∇, p, y)
CUDA.synchronize()
return
end
#####################
# Tweedie
#####################
function kernel_tweedie_∇!(∇::CuDeviceMatrix, p::CuDeviceMatrix, y::CuDeviceVector)
i = threadIdx().x + (blockIdx().x - 1) * blockDim().x
rho = eltype(p)(1.5)
if i <= length(y)
@inbounds pred = exp(p[1, i])
@inbounds ∇[1, i] = 2 * (pred^(2 - rho) - y[i] * pred^(1 - rho)) * ∇[3, i]
@inbounds ∇[2, i] =
2 * ((2 - rho) * pred^(2 - rho) - (1 - rho) * y[i] * pred^(1 - rho)) * ∇[3, i]
end
return
end
function EvoTrees.update_grads!(
∇::CuMatrix,
p::CuMatrix,
y::CuVector,
::EvoTreeRegressor{<:EvoTrees.Tweedie};
MAX_THREADS=1024
)
threads = min(MAX_THREADS, length(y))
blocks = cld(length(y), threads)
@cuda blocks = blocks threads = threads kernel_tweedie_∇!(∇, p, y)
CUDA.synchronize()
return
end
#####################
# Softmax
#####################
function kernel_mlogloss_∇!(∇::CuDeviceMatrix{T}, p::CuDeviceMatrix{T}, y::CuDeviceVector) where {T}
i = threadIdx().x + (blockIdx().x - 1) * blockDim().x
K = size(p, 1)
if i <= length(y)
isum = zero(T)
@inbounds for k in 1:K
isum += exp(p[k, i])
end
@inbounds for k in 1:K
iexp = exp(p[k, i])
if k == y[i]
∇[k, i] = (iexp / isum - 1) * ∇[end, i]
else
∇[k, i] = iexp / isum * ∇[end, i]
end
∇[k+K, i] = 1 / isum * (1 - iexp / isum) * ∇[end, i]
end
end
return
end
function EvoTrees.update_grads!(
∇::CuMatrix,
p::CuMatrix,
y::CuVector,
::EvoTreeClassifier{<:EvoTrees.MLogLoss};
MAX_THREADS=1024
)
threads = min(MAX_THREADS, length(y))
blocks = cld(length(y), threads)
@cuda blocks = blocks threads = threads kernel_mlogloss_∇!(∇, p, y)
CUDA.synchronize()
return
end
################################################################################
# Gaussian - http://jrmeyer.github.io/machinelearning/2017/08/18/mle.html
# pred[i][1] = μ
# pred[i][2] = log(σ)
################################################################################
function kernel_gauss_∇!(∇::CuDeviceMatrix, p::CuDeviceMatrix, y::CuDeviceVector)
i = threadIdx().x + (blockIdx().x - 1) * blockDim().x
@inbounds if i <= length(y)
# first order gradients
∇[1, i] = (p[1, i] - y[i]) / exp(2 * p[2, i]) * ∇[5, i]
∇[2, i] = (1 - (p[1, i] - y[i])^2 / exp(2 * p[2, i])) * ∇[5, i]
# # second order gradients
∇[3, i] = ∇[5, i] / exp(2 * p[2, i])
∇[4, i] = 2 * ∇[5, i] / exp(2 * p[2, i]) * (p[1, i] - y[i])^2
end
return
end
function EvoTrees.update_grads!(
∇::CuMatrix,
p::CuMatrix,
y::CuVector,
::Union{EvoTreeGaussian{<:EvoTrees.GaussianMLE},EvoTreeMLE{<:EvoTrees.GaussianMLE}};
MAX_THREADS=1024
)
threads = min(MAX_THREADS, length(y))
blocks = cld(length(y), threads)
@cuda blocks = blocks threads = threads kernel_gauss_∇!(∇, p, y)
CUDA.synchronize()
return
end
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 5472 | function predict_kernel!(
::Type{L},
pred::CuDeviceMatrix{T},
split,
feats,
cond_bins,
leaf_pred,
x_bin,
feattypes,
) where {L,T}
i = threadIdx().x + (blockIdx().x - 1) * blockDim().x
nid = 1
K = size(pred, 1)
@inbounds if i <= size(pred, 2)
@inbounds while split[nid]
feat = feats[nid]
cond = feattypes[feat] ? x_bin[i, feat] <= cond_bins[nid] : x_bin[i, feat] == cond_bins[nid]
nid = nid << 1 + !cond
end
@inbounds for k = 1:K
pred[k, i] += leaf_pred[k, nid]
end
end
sync_threads()
return nothing
end
# GradientRegression
function predict_kernel!(
::Type{<:EvoTrees.GradientRegression},
pred::CuDeviceMatrix{T},
split,
feats,
cond_bins,
leaf_pred,
x_bin,
feattypes,
) where {T}
i = threadIdx().x + (blockIdx().x - 1) * blockDim().x
nid = 1
@inbounds if i <= size(pred, 2)
@inbounds while split[nid]
feat = feats[nid]
cond = feattypes[feat] ? x_bin[i, feat] <= cond_bins[nid] : x_bin[i, feat] == cond_bins[nid]
nid = nid << 1 + !cond
end
pred[1, i] += leaf_pred[1, nid]
end
sync_threads()
return nothing
end
# Logistic
function predict_kernel!(
::Type{<:EvoTrees.LogLoss},
pred::CuDeviceMatrix{T},
split,
feats,
cond_bins,
leaf_pred,
x_bin,
feattypes,
) where {T}
i = threadIdx().x + (blockIdx().x - 1) * blockDim().x
nid = 1
@inbounds if i <= size(pred, 2)
@inbounds while split[nid]
feat = feats[nid]
cond = feattypes[feat] ? x_bin[i, feat] <= cond_bins[nid] : x_bin[i, feat] == cond_bins[nid]
nid = nid << 1 + !cond
end
pred[1, i] = min(T(15), max(T(-15), pred[1, i] + leaf_pred[1, nid]))
end
sync_threads()
return nothing
end
# MLE2P
function predict_kernel!(
::Type{<:EvoTrees.MLE2P},
pred::CuDeviceMatrix{T},
split,
feats,
cond_bins,
leaf_pred,
x_bin,
feattypes,
) where {T}
i = threadIdx().x + (blockIdx().x - 1) * blockDim().x
nid = 1
@inbounds if i <= size(pred, 2)
@inbounds while split[nid]
feat = feats[nid]
cond = feattypes[feat] ? x_bin[i, feat] <= cond_bins[nid] : x_bin[i, feat] == cond_bins[nid]
nid = nid << 1 + !cond
end
pred[1, i] += leaf_pred[1, nid]
pred[2, i] = max(T(-15), pred[2, i] + leaf_pred[2, nid])
end
sync_threads()
return nothing
end
# prediction from single tree - assign each observation to its final leaf
function EvoTrees.predict!(
pred::CuMatrix{T},
tree::EvoTrees.Tree{L,K},
x_bin::CuMatrix,
feattypes::CuVector{Bool};
MAX_THREADS=1024
) where {L,K,T}
n = size(pred, 2)
threads = min(MAX_THREADS, n)
blocks = cld(n, threads)
@cuda blocks = blocks threads = threads predict_kernel!(
L,
pred,
CuArray(tree.split),
CuArray(tree.feat),
CuArray(tree.cond_bin),
CuArray(tree.pred),
x_bin,
feattypes,
)
CUDA.synchronize()
end
function EvoTrees.predict!(
pred::CuMatrix{T},
tree::EvoTrees.Tree{L,K},
x_bin::CuMatrix,
feattypes::CuVector{Bool};
MAX_THREADS=1024
) where {L<:EvoTrees.MLogLoss,K,T}
n = size(pred, 2)
threads = min(MAX_THREADS, n)
blocks = cld(n, threads)
@cuda blocks = blocks threads = threads predict_kernel!(
L,
pred,
CuArray(tree.split),
CuArray(tree.feat),
CuArray(tree.cond_bin),
CuArray(tree.pred),
x_bin,
feattypes,
)
CUDA.synchronize()
pred .= max.(T(-15), pred .- maximum(pred, dims=1))
end
# prediction for EvoTree model
function predict(
m::EvoTree{L,K},
data,
::Type{<:EvoTrees.GPU};
ntree_limit=length(m.trees)) where {L,K}
Tables.istable(data) ? data = Tables.columntable(data) : nothing
ntrees = length(m.trees)
ntree_limit > ntrees && error("ntree_limit is larger than number of trees $ntrees.")
x_bin = CuArray(EvoTrees.binarize(data; fnames=m.info[:fnames], edges=m.info[:edges]))
nobs = size(x_bin, 1)
pred = CUDA.zeros(K, nobs)
feattypes = CuArray(m.info[:feattypes])
for i = 1:ntree_limit
EvoTrees.predict!(pred, m.trees[i], x_bin, feattypes)
end
if L == EvoTrees.LogLoss
pred .= EvoTrees.sigmoid.(pred)
elseif L ∈ [EvoTrees.Poisson, EvoTrees.Gamma, EvoTrees.Tweedie]
pred .= exp.(pred)
elseif L in [EvoTrees.GaussianMLE, EvoTrees.LogisticMLE]
pred[2, :] .= exp.(pred[2, :])
elseif L == EvoTrees.MLogLoss
EvoTrees.softmax!(pred)
end
pred = K == 1 ? vec(Array(pred')) : Array(pred')
return pred
end
function softmax_kernel!(p::CuDeviceMatrix{T}) where {T}
i = threadIdx().x + (blockIdx().x - 1) * blockDim().x
K, nobs = size(p)
if i <= nobs
isum = zero(T)
@inbounds for k in 1:K
p[k, i] = exp(p[k, i])
isum += exp(p[k, i])
end
@inbounds for k in 1:K
p[k, i] /= isum
end
end
return nothing
end
function EvoTrees.softmax!(p::CuMatrix{T}; MAX_THREADS=1024) where {T}
K, nobs = size(p)
threads = min(MAX_THREADS, nobs)
blocks = cld(nobs, threads)
@cuda blocks = blocks threads = threads softmax_kernel!(p)
CUDA.synchronize()
return nothing
end
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 2244 | function get_rand_kernel!(mask)
tix = threadIdx().x
bdx = blockDim().x
bix = blockIdx().x
gdx = gridDim().x
i_max = length(mask)
niter = cld(i_max, bdx * gdx)
for iter = 1:niter
i = tix + bdx * (bix - 1) + bdx * gdx * (iter - 1)
if i <= i_max
mask[i] = rand(UInt8)
end
end
sync_threads()
end
function get_rand_gpu!(mask)
threads = (1024,)
blocks = (256,)
@cuda threads = threads blocks = blocks get_rand_kernel!(mask)
CUDA.synchronize()
end
function subsample_step_1_kernel(is_in, mask, cond, counts, chunk_size)
bid = blockIdx().x
gdim = gridDim().x
i_start = chunk_size * (bid - 1) + 1
i_stop = bid == gdim ? length(is_in) : i_start + chunk_size - 1
count = 0
@inbounds for i = i_start:i_stop
@inbounds if mask[i] <= cond
is_in[i_start+count] = i
count += 1
end
end
sync_threads()
@inbounds counts[bid] = count
sync_threads()
end
function subsample_step_2_kernel(is_in, is_out, counts, counts_cum, chunk_size)
bid = blockIdx().x
count_cum = counts_cum[bid]
i_start = chunk_size * (bid - 1)
@inbounds for i = 1:counts[bid]
is_out[count_cum+i] = is_in[i_start+i]
end
sync_threads()
end
function EvoTrees.subsample(is_in::CuVector, is_out::CuVector, mask::CuVector, rowsample::AbstractFloat, rng)
get_rand_gpu!(mask)
cond = round(UInt8, 255 * rowsample)
chunk_size = cld(length(is_in), min(cld(length(is_in), 128), 2048))
nblocks = cld(length(is_in), chunk_size)
counts = CUDA.zeros(Int, nblocks)
blocks = (nblocks,)
threads = (1,)
@cuda blocks = nblocks threads = 1 subsample_step_1_kernel(
is_in,
mask,
cond,
counts,
chunk_size,
)
CUDA.synchronize()
counts_cum = cumsum(counts) - counts
@cuda blocks = nblocks threads = 1 subsample_step_2_kernel(
is_in,
is_out,
counts,
counts_cum,
chunk_size,
)
CUDA.synchronize()
counts_sum = sum(counts)
if counts_cum == 0
@error "no subsample observation - choose larger rowsample"
else
return view(is_out, 1:counts_sum)
end
end
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 1148 | module EvoTrees
export fit_evotree
export EvoTreeRegressor,
EvoTreeCount,
EvoTreeClassifier,
EvoTreeMLE,
EvoTreeGaussian,
EvoTree,
Random
using Base.Threads: @threads, @spawn, nthreads, threadid
using Statistics
using StatsBase: sample, sample!, quantile, proportions
using Random
using Random: seed!, AbstractRNG
using Distributions
using Tables
using CategoricalArrays
using Tables
using BSON
using NetworkLayout
using RecipesBase
using MLJModelInterface
import MLJModelInterface as MMI
import MLJModelInterface: fit, update, predict, schema
import Base: convert
include("models.jl")
include("structs.jl")
include("loss.jl")
include("eval.jl")
include("predict.jl")
include("init.jl")
include("subsample.jl")
include("fit-utils.jl")
include("fit.jl")
if !isdefined(Base, :get_extension)
include("../ext/EvoTreesCUDAExt/EvoTreesCUDAExt.jl")
end
include("callback.jl")
include("importance.jl")
include("plot.jl")
include("MLJ.jl")
function save(model::EvoTree, path)
BSON.bson(path, Dict(:model => model))
end
function load(path)
m = BSON.load(path, @__MODULE__)
return m[:model]
end
end # module
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 33537 | function MMI.fit(model::EvoTypes, verbosity::Int, A, y, w=nothing)
A = isa(A, AbstractMatrix) ? Tables.columntable(Tables.table(A)) : Tables.columntable(A)
nobs = Tables.DataAPI.nrow(A)
fnames = Tables.schema(A).names
w = isnothing(w) ? device_ones(CPU, Float32, nobs) : Vector{Float32}(w)
fitresult, cache = init_core(model, CPU, A, fnames, y, w, nothing)
while cache[:info][:nrounds] < model.nrounds
grow_evotree!(fitresult, cache, model)
end
report = (features=cache[:fnames],)
return fitresult, cache, report
end
function okay_to_continue(model, fitresult, cache)
return model.nrounds - cache[:info][:nrounds] >= 0 &&
all(_get_struct_loss(model) .== _get_struct_loss(fitresult))
end
# For EarlyStopping.jl support
MMI.iteration_parameter(::Type{<:EvoTypes}) = :nrounds
function MMI.update(
model::EvoTypes,
verbosity::Integer,
fitresult,
cache,
A,
y,
w=nothing,
)
if okay_to_continue(model, fitresult, cache)
while cache[:info][:nrounds] < model.nrounds
grow_evotree!(fitresult, cache, model)
end
report = (features=cache[:fnames],)
else
fitresult, cache, report = fit(model, verbosity, A, y, w)
end
return fitresult, cache, report
end
function predict(::EvoTreeRegressor, fitresult, A)
pred = predict(fitresult, A)
return pred
end
function predict(::EvoTreeClassifier, fitresult, A)
pred = predict(fitresult, A)
return MMI.UnivariateFinite(fitresult.info[:target_levels], pred, pool=missing, ordered=fitresult.info[:target_isordered])
end
function predict(::EvoTreeCount, fitresult, A)
λs = predict(fitresult, A)
return [Distributions.Poisson(λ) for λ ∈ λs]
end
function predict(::EvoTreeGaussian, fitresult, A)
pred = predict(fitresult, A)
return [Distributions.Normal(pred[i, 1], pred[i, 2]) for i in axes(pred, 1)]
end
function predict(::EvoTreeMLE{L}, fitresult, A) where {L<:GaussianMLE}
pred = predict(fitresult, A)
return [Distributions.Normal(pred[i, 1], pred[i, 2]) for i in axes(pred, 1)]
end
function predict(::EvoTreeMLE{L}, fitresult, A) where {L<:LogisticMLE}
pred = predict(fitresult, A)
return [Distributions.Logistic(pred[i, 1], pred[i, 2]) for i in axes(pred, 1)]
end
# Feature Importances
MMI.reports_feature_importances(::Type{<:EvoTypes}) = true
MMI.supports_weights(::Type{<:EvoTypes}) = true
function MMI.feature_importances(m::EvoTypes, fitresult, report)
fi_pairs = importance(fitresult, fnames=report[:features])
return fi_pairs
end
# Metadata
MMI.metadata_pkg.(
(EvoTreeRegressor, EvoTreeClassifier, EvoTreeCount, EvoTreeGaussian, EvoTreeMLE),
name="EvoTrees",
uuid="f6006082-12f8-11e9-0c9c-0d5d367ab1e5",
url="https://github.com/Evovest/EvoTrees.jl",
julia=true,
license="Apache",
is_wrapper=false,
)
MMI.metadata_model(
EvoTreeRegressor,
input_scitype=Union{
MMI.Table(MMI.Continuous, MMI.Count, MMI.OrderedFactor, MMI.Multiclass),
AbstractMatrix{MMI.Continuous},
},
target_scitype=AbstractVector{<:MMI.Continuous},
weights=true,
path="EvoTrees.EvoTreeRegressor",
)
MMI.metadata_model(
EvoTreeClassifier,
input_scitype=Union{
MMI.Table(MMI.Continuous, MMI.Count, MMI.OrderedFactor, MMI.Multiclass),
AbstractMatrix{MMI.Continuous},
},
target_scitype=AbstractVector{<:MMI.Finite},
weights=true,
path="EvoTrees.EvoTreeClassifier",
)
MMI.metadata_model(
EvoTreeCount,
input_scitype=Union{
MMI.Table(MMI.Continuous, MMI.Count, MMI.OrderedFactor, MMI.Multiclass),
AbstractMatrix{MMI.Continuous},
},
target_scitype=AbstractVector{<:MMI.Count},
weights=true,
path="EvoTrees.EvoTreeCount",
)
MMI.metadata_model(
EvoTreeGaussian,
input_scitype=Union{
MMI.Table(MMI.Continuous, MMI.Count, MMI.OrderedFactor, MMI.Multiclass),
AbstractMatrix{MMI.Continuous},
},
target_scitype=AbstractVector{<:MMI.Continuous},
weights=true,
path="EvoTrees.EvoTreeGaussian",
)
MMI.metadata_model(
EvoTreeMLE,
input_scitype=Union{
MMI.Table(MMI.Continuous, MMI.Count, MMI.OrderedFactor, MMI.Multiclass),
AbstractMatrix{MMI.Continuous},
},
target_scitype=AbstractVector{<:MMI.Continuous},
weights=true,
path="EvoTrees.EvoTreeMLE",
)
"""
EvoTreeRegressor(;kwargs...)
A model type for constructing a EvoTreeRegressor, based on [EvoTrees.jl](https://github.com/Evovest/EvoTrees.jl), and implementing both an internal API and the MLJ model interface.
# Hyper-parameters
- `loss=:mse`: Loss to be be minimized during training. One of:
- `:mse`
- `:logloss`
- `:gamma`
- `:tweedie`
- `:quantile`
- `:l1`
- `nrounds=100`: Number of rounds. It corresponds to the number of trees that will be sequentially stacked. Must be >= 1.
- `eta=0.1`: Learning rate. Each tree raw predictions are scaled by `eta` prior to be added to the stack of predictions. Must be > 0.
A lower `eta` results in slower learning, requiring a higher `nrounds` but typically improves model performance.
- `L2::T=0.0`: L2 regularization factor on aggregate gain. Must be >= 0. Higher L2 can result in a more robust model.
- `lambda::T=0.0`: L2 regularization factor on individual gain. Must be >= 0. Higher lambda can result in a more robust model.
- `gamma::T=0.0`: Minimum gain improvement needed to perform a node split. Higher gamma can result in a more robust model. Must be >= 0.
- `alpha::T=0.5`: Loss specific parameter in the [0, 1] range:
- `:quantile`: target quantile for the regression.
- `:l1`: weighting parameters to positive vs negative residuals.
- Positive residual weights = `alpha`
- Negative residual weights = `(1 - alpha)`
- `max_depth=6`: Maximum depth of a tree. Must be >= 1. A tree of depth 1 is made of a single prediction leaf.
A complete tree of depth N contains `2^(N - 1)` terminal leaves and `2^(N - 1) - 1` split nodes.
Compute cost is proportional to `2^max_depth`. Typical optimal values are in the 3 to 9 range.
- `min_weight=1.0`: Minimum weight needed in a node to perform a split. Matches the number of observations by default or the sum of weights as provided by the `weights` vector. Must be > 0.
- `rowsample=1.0`: Proportion of rows that are sampled at each iteration to build the tree. Should be in `]0, 1]`.
- `colsample=1.0`: Proportion of columns / features that are sampled at each iteration to build the tree. Should be in `]0, 1]`.
- `nbins=64`: Number of bins into which each feature is quantized. Buckets are defined based on quantiles, hence resulting in equal weight bins. Should be between 2 and 255.
- `monotone_constraints=Dict{Int, Int}()`: Specify monotonic constraints using a dict where the key is the feature index and the value the applicable constraint (-1=decreasing, 0=none, 1=increasing).
Only `:linear`, `:logistic`, `:gamma` and `tweedie` losses are supported at the moment.
- `tree_type="binary"` Tree structure to be used. One of:
- `binary`: Each node of a tree is grown independently. Tree are built depthwise until max depth is reach or if min weight or gain (see `gamma`) stops further node splits.
- `oblivious`: A common splitting condition is imposed to all nodes of a given depth.
- `rng=123`: Either an integer used as a seed to the random number generator or an actual random number generator (`::Random.AbstractRNG`).
# Internal API
Do `config = EvoTreeRegressor()` to construct an instance with default hyper-parameters.
Provide keyword arguments to override hyper-parameter defaults, as in EvoTreeRegressor(loss=...).
## Training model
A model is built using [`fit_evotree`](@ref):
```julia
model = fit_evotree(config; x_train, y_train, kwargs...)
```
## Inference
Predictions are obtained using [`predict`](@ref) which returns a `Vector` of length `nobs`:
```julia
EvoTrees.predict(model, X)
```
Alternatively, models act as a functor, returning predictions when called as a function with features as argument:
```julia
model(X)
```
# MLJ Interface
From MLJ, the type can be imported using:
```julia
EvoTreeRegressor = @load EvoTreeRegressor pkg=EvoTrees
```
Do `model = EvoTreeRegressor()` to construct an instance with default hyper-parameters.
Provide keyword arguments to override hyper-parameter defaults, as in `EvoTreeRegressor(loss=...)`.
## Training model
In MLJ or MLJBase, bind an instance `model` to data with
`mach = machine(model, X, y)` where
- `X`: any table of input features (eg, a `DataFrame`) whose columns
each have one of the following element scitypes: `Continuous`,
`Count`, or `<:OrderedFactor`; check column scitypes with `schema(X)`
- `y`: is the target, which can be any `AbstractVector` whose element
scitype is `<:Continuous`; check the scitype
with `scitype(y)`
Train the machine using `fit!(mach, rows=...)`.
## Operations
- `predict(mach, Xnew)`: return predictions of the target given
features `Xnew` having the same scitype as `X` above. Predictions
are deterministic.
## Fitted parameters
The fields of `fitted_params(mach)` are:
- `:fitresult`: The `GBTree` object returned by EvoTrees.jl fitting algorithm.
## Report
The fields of `report(mach)` are:
- `:features`: The names of the features encountered in training.
# Examples
```
# Internal API
using EvoTrees
config = EvoTreeRegressor(max_depth=5, nbins=32, nrounds=100)
nobs, nfeats = 1_000, 5
x_train, y_train = randn(nobs, nfeats), rand(nobs)
model = fit_evotree(config; x_train, y_train)
preds = EvoTrees.predict(model, x_train)
```
```
# MLJ Interface
using MLJ
EvoTreeRegressor = @load EvoTreeRegressor pkg=EvoTrees
model = EvoTreeRegressor(max_depth=5, nbins=32, nrounds=100)
X, y = @load_boston
mach = machine(model, X, y) |> fit!
preds = predict(mach, X)
```
"""
EvoTreeRegressor
"""
EvoTreeClassifier(;kwargs...)
A model type for constructing a EvoTreeClassifier, based on [EvoTrees.jl](https://github.com/Evovest/EvoTrees.jl), and implementing both an internal API and the MLJ model interface.
EvoTreeClassifier is used to perform multi-class classification, using cross-entropy loss.
# Hyper-parameters
- `nrounds=100`: Number of rounds. It corresponds to the number of trees that will be sequentially stacked. Must be >= 1.
- `eta=0.1`: Learning rate. Each tree raw predictions are scaled by `eta` prior to be added to the stack of predictions. Must be > 0.
A lower `eta` results in slower learning, requiring a higher `nrounds` but typically improves model performance.
- `L2::T=0.0`: L2 regularization factor on aggregate gain. Must be >= 0. Higher L2 can result in a more robust model.
- `lambda::T=0.0`: L2 regularization factor on individual gain. Must be >= 0. Higher lambda can result in a more robust model.
- `gamma::T=0.0`: Minimum gain improvement needed to perform a node split. Higher gamma can result in a more robust model. Must be >= 0.
- `max_depth=6`: Maximum depth of a tree. Must be >= 1. A tree of depth 1 is made of a single prediction leaf.
A complete tree of depth N contains `2^(N - 1)` terminal leaves and `2^(N - 1) - 1` split nodes.
Compute cost is proportional to `2^max_depth`. Typical optimal values are in the 3 to 9 range.
- `min_weight=1.0`: Minimum weight needed in a node to perform a split. Matches the number of observations by default or the sum of weights as provided by the `weights` vector. Must be > 0.
- `rowsample=1.0`: Proportion of rows that are sampled at each iteration to build the tree. Should be in `]0, 1]`.
- `colsample=1.0`: Proportion of columns / features that are sampled at each iteration to build the tree. Should be in `]0, 1]`.
- `nbins=64`: Number of bins into which each feature is quantized. Buckets are defined based on quantiles, hence resulting in equal weight bins. Should be between 2 and 255.
- `tree_type="binary"` Tree structure to be used. One of:
- `binary`: Each node of a tree is grown independently. Tree are built depthwise until max depth is reach or if min weight or gain (see `gamma`) stops further node splits.
- `oblivious`: A common splitting condition is imposed to all nodes of a given depth.
- `rng=123`: Either an integer used as a seed to the random number generator or an actual random number generator (`::Random.AbstractRNG`).
# Internal API
Do `config = EvoTreeClassifier()` to construct an instance with default hyper-parameters.
Provide keyword arguments to override hyper-parameter defaults, as in EvoTreeClassifier(max_depth=...).
## Training model
A model is built using [`fit_evotree`](@ref):
```julia
model = fit_evotree(config; x_train, y_train, kwargs...)
```
## Inference
Predictions are obtained using [`predict`](@ref) which returns a `Matrix` of size `[nobs, K]` where `K` is the number of classes:
```julia
EvoTrees.predict(model, X)
```
Alternatively, models act as a functor, returning predictions when called as a function with features as argument:
```julia
model(X)
```
# MLJ
From MLJ, the type can be imported using:
```julia
EvoTreeClassifier = @load EvoTreeClassifier pkg=EvoTrees
```
Do `model = EvoTreeClassifier()` to construct an instance with default hyper-parameters.
Provide keyword arguments to override hyper-parameter defaults, as in `EvoTreeClassifier(loss=...)`.
## Training data
In MLJ or MLJBase, bind an instance `model` to data with
mach = machine(model, X, y)
where
- `X`: any table of input features (eg, a `DataFrame`) whose columns
each have one of the following element scitypes: `Continuous`,
`Count`, or `<:OrderedFactor`; check column scitypes with `schema(X)`
- `y`: is the target, which can be any `AbstractVector` whose element
scitype is `<:Multiclas` or `<:OrderedFactor`; check the scitype
with `scitype(y)`
Train the machine using `fit!(mach, rows=...)`.
## Operations
- `predict(mach, Xnew)`: return predictions of the target given features `Xnew` having the same scitype as `X` above.
Predictions are probabilistic.
- `predict_mode(mach, Xnew)`: returns the mode of each of the prediction above.
## Fitted parameters
The fields of `fitted_params(mach)` are:
- `:fitresult`: The `GBTree` object returned by EvoTrees.jl fitting algorithm.
## Report
The fields of `report(mach)` are:
- `:features`: The names of the features encountered in training.
# Examples
```
# Internal API
using EvoTrees
config = EvoTreeClassifier(max_depth=5, nbins=32, nrounds=100)
nobs, nfeats = 1_000, 5
x_train, y_train = randn(nobs, nfeats), rand(1:3, nobs)
model = fit_evotree(config; x_train, y_train)
preds = EvoTrees.predict(model, x_train)
```
```
# MLJ Interface
using MLJ
EvoTreeClassifier = @load EvoTreeClassifier pkg=EvoTrees
model = EvoTreeClassifier(max_depth=5, nbins=32, nrounds=100)
X, y = @load_iris
mach = machine(model, X, y) |> fit!
preds = predict(mach, X)
preds = predict_mode(mach, X)
```
See also
[EvoTrees.jl](https://github.com/Evovest/EvoTrees.jl).
"""
EvoTreeClassifier
"""
EvoTreeCount(;kwargs...)
A model type for constructing a EvoTreeCount, based on [EvoTrees.jl](https://github.com/Evovest/EvoTrees.jl), and implementing both an internal API the MLJ model interface.
EvoTreeCount is used to perform Poisson probabilistic regression on count target.
# Hyper-parameters
- `nrounds=100`: Number of rounds. It corresponds to the number of trees that will be sequentially stacked. Must be >= 1.
- `eta=0.1`: Learning rate. Each tree raw predictions are scaled by `eta` prior to be added to the stack of predictions. Must be > 0.
A lower `eta` results in slower learning, requiring a higher `nrounds` but typically improves model performance.
- `L2::T=0.0`: L2 regularization factor on aggregate gain. Must be >= 0. Higher L2 can result in a more robust model.
- `lambda::T=0.0`: L2 regularization factor on individual gain. Must be >= 0. Higher lambda can result in a more robust model.
- `gamma::T=0.0`: Minimum gain imprvement needed to perform a node split. Higher gamma can result in a more robust model.
- `max_depth=6`: Maximum depth of a tree. Must be >= 1. A tree of depth 1 is made of a single prediction leaf.
A complete tree of depth N contains `2^(N - 1)` terminal leaves and `2^(N - 1) - 1` split nodes.
Compute cost is proportional to 2^max_depth. Typical optimal values are in the 3 to 9 range.
- `min_weight=1.0`: Minimum weight needed in a node to perform a split. Matches the number of observations by default or the sum of weights as provided by the `weights` vector. Must be > 0.
- `rowsample=1.0`: Proportion of rows that are sampled at each iteration to build the tree. Should be `]0, 1]`.
- `colsample=1.0`: Proportion of columns / features that are sampled at each iteration to build the tree. Should be `]0, 1]`.
- `nbins=64`: Number of bins into which each feature is quantized. Buckets are defined based on quantiles, hence resulting in equal weight bins. Should be between 2 and 255.
- `monotone_constraints=Dict{Int, Int}()`: Specify monotonic constraints using a dict where the key is the feature index and the value the applicable constraint (-1=decreasing, 0=none, 1=increasing).
- `tree_type="binary"` Tree structure to be used. One of:
- `binary`: Each node of a tree is grown independently. Tree are built depthwise until max depth is reach or if min weight or gain (see `gamma`) stops further node splits.
- `oblivious`: A common splitting condition is imposed to all nodes of a given depth.
- `rng=123`: Either an integer used as a seed to the random number generator or an actual random number generator (`::Random.AbstractRNG`).
# Internal API
Do `config = EvoTreeCount()` to construct an instance with default hyper-parameters.
Provide keyword arguments to override hyper-parameter defaults, as in EvoTreeCount(max_depth=...).
## Training model
A model is built using [`fit_evotree`](@ref):
```julia
model = fit_evotree(config; x_train, y_train, kwargs...)
```
## Inference
Predictions are obtained using [`predict`](@ref) which returns a `Vector` of length `nobs`:
```julia
EvoTrees.predict(model, X)
```
Alternatively, models act as a functor, returning predictions when called as a function with features as argument:
```julia
model(X)
```
# MLJ
From MLJ, the type can be imported using:
```julia
EvoTreeCount = @load EvoTreeCount pkg=EvoTrees
```
Do `model = EvoTreeCount()` to construct an instance with default hyper-parameters.
Provide keyword arguments to override hyper-parameter defaults, as in `EvoTreeCount(loss=...)`.
## Training data
In MLJ or MLJBase, bind an instance `model` to data with
mach = machine(model, X, y)
where
- `X`: any table of input features (eg, a `DataFrame`) whose columns
each have one of the following element scitypes: `Continuous`,
`Count`, or `<:OrderedFactor`; check column scitypes with `schema(X)`
- `y`: is the target, which can be any `AbstractVector` whose element
scitype is `<:Count`; check the scitype
with `scitype(y)`
Train the machine using `fit!(mach, rows=...)`.
# Operations
- `predict(mach, Xnew)`: returns a vector of Poisson distributions given features `Xnew`
having the same scitype as `X` above. Predictions are probabilistic.
Specific metrics can also be predicted using:
- `predict_mean(mach, Xnew)`
- `predict_mode(mach, Xnew)`
- `predict_median(mach, Xnew)`
## Fitted parameters
The fields of `fitted_params(mach)` are:
- `:fitresult`: The `GBTree` object returned by EvoTrees.jl fitting algorithm.
## Report
The fields of `report(mach)` are:
- `:features`: The names of the features encountered in training.
# Examples
```
# Internal API
using EvoTrees
config = EvoTreeCount(max_depth=5, nbins=32, nrounds=100)
nobs, nfeats = 1_000, 5
x_train, y_train = randn(nobs, nfeats), rand(0:2, nobs)
model = fit_evotree(config; x_train, y_train)
preds = EvoTrees.predict(model, x_train)
```
```
using MLJ
EvoTreeCount = @load EvoTreeCount pkg=EvoTrees
model = EvoTreeCount(max_depth=5, nbins=32, nrounds=100)
nobs, nfeats = 1_000, 5
X, y = randn(nobs, nfeats), rand(0:2, nobs)
mach = machine(model, X, y) |> fit!
preds = predict(mach, X)
preds = predict_mean(mach, X)
preds = predict_mode(mach, X)
preds = predict_median(mach, X)
```
See also
[EvoTrees.jl](https://github.com/Evovest/EvoTrees.jl).
"""
EvoTreeCount
"""
EvoTreeGaussian(;kwargs...)
A model type for constructing a EvoTreeGaussian, based on [EvoTrees.jl](https://github.com/Evovest/EvoTrees.jl), and implementing both an internal API the MLJ model interface.
EvoTreeGaussian is used to perform Gaussian probabilistic regression, fitting μ and σ parameters to maximize likelihood.
# Hyper-parameters
- `nrounds=100`: Number of rounds. It corresponds to the number of trees that will be sequentially stacked. Must be >= 1.
- `eta=0.1`: Learning rate. Each tree raw predictions are scaled by `eta` prior to be added to the stack of predictions. Must be > 0.
A lower `eta` results in slower learning, requiring a higher `nrounds` but typically improves model performance.
- `L2::T=0.0`: L2 regularization factor on aggregate gain. Must be >= 0. Higher L2 can result in a more robust model.
- `lambda::T=0.0`: L2 regularization factor on individual gain. Must be >= 0. Higher lambda can result in a more robust model.
- `gamma::T=0.0`: Minimum gain imprvement needed to perform a node split. Higher gamma can result in a more robust model. Must be >= 0.
- `max_depth=6`: Maximum depth of a tree. Must be >= 1. A tree of depth 1 is made of a single prediction leaf.
A complete tree of depth N contains `2^(N - 1)` terminal leaves and `2^(N - 1) - 1` split nodes.
Compute cost is proportional to 2^max_depth. Typical optimal values are in the 3 to 9 range.
- `min_weight=8.0`: Minimum weight needed in a node to perform a split. Matches the number of observations by default or the sum of weights as provided by the `weights` vector. Must be > 0.
- `rowsample=1.0`: Proportion of rows that are sampled at each iteration to build the tree. Should be in `]0, 1]`.
- `colsample=1.0`: Proportion of columns / features that are sampled at each iteration to build the tree. Should be in `]0, 1]`.
- `nbins=64`: Number of bins into which each feature is quantized. Buckets are defined based on quantiles, hence resulting in equal weight bins. Should be between 2 and 255.
- `monotone_constraints=Dict{Int, Int}()`: Specify monotonic constraints using a dict where the key is the feature index and the value the applicable constraint (-1=decreasing, 0=none, 1=increasing).
!Experimental feature: note that for Gaussian regression, constraints may not be enforce systematically.
- `tree_type="binary"` Tree structure to be used. One of:
- `binary`: Each node of a tree is grown independently. Tree are built depthwise until max depth is reach or if min weight or gain (see `gamma`) stops further node splits.
- `oblivious`: A common splitting condition is imposed to all nodes of a given depth.
- `rng=123`: Either an integer used as a seed to the random number generator or an actual random number generator (`::Random.AbstractRNG`).
# Internal API
Do `config = EvoTreeGaussian()` to construct an instance with default hyper-parameters.
Provide keyword arguments to override hyper-parameter defaults, as in EvoTreeGaussian(max_depth=...).
## Training model
A model is built using [`fit_evotree`](@ref):
```julia
model = fit_evotree(config; x_train, y_train, kwargs...)
```
## Inference
Predictions are obtained using [`predict`](@ref) which returns a `Matrix` of size `[nobs, 2]` where the second dimensions refer to `μ` and `σ` respectively:
```julia
EvoTrees.predict(model, X)
```
Alternatively, models act as a functor, returning predictions when called as a function with features as argument:
```julia
model(X)
```
# MLJ
From MLJ, the type can be imported using:
```julia
EvoTreeGaussian = @load EvoTreeGaussian pkg=EvoTrees
```
Do `model = EvoTreeGaussian()` to construct an instance with default hyper-parameters.
Provide keyword arguments to override hyper-parameter defaults, as in `EvoTreeGaussian(loss=...)`.
## Training data
In MLJ or MLJBase, bind an instance `model` to data with
mach = machine(model, X, y)
where
- `X`: any table of input features (eg, a `DataFrame`) whose columns
each have one of the following element scitypes: `Continuous`,
`Count`, or `<:OrderedFactor`; check column scitypes with `schema(X)`
- `y`: is the target, which can be any `AbstractVector` whose element
scitype is `<:Continuous`; check the scitype
with `scitype(y)`
Train the machine using `fit!(mach, rows=...)`.
## Operations
- `predict(mach, Xnew)`: returns a vector of Gaussian distributions given features `Xnew` having the same scitype as `X` above.
Predictions are probabilistic.
Specific metrics can also be predicted using:
- `predict_mean(mach, Xnew)`
- `predict_mode(mach, Xnew)`
- `predict_median(mach, Xnew)`
## Fitted parameters
The fields of `fitted_params(mach)` are:
- `:fitresult`: The `GBTree` object returned by EvoTrees.jl fitting algorithm.
## Report
The fields of `report(mach)` are:
- `:features`: The names of the features encountered in training.
# Examples
```
# Internal API
using EvoTrees
params = EvoTreeGaussian(max_depth=5, nbins=32, nrounds=100)
nobs, nfeats = 1_000, 5
x_train, y_train = randn(nobs, nfeats), rand(nobs)
model = fit_evotree(params; x_train, y_train)
preds = EvoTrees.predict(model, x_train)
```
```
# MLJ Interface
using MLJ
EvoTreeGaussian = @load EvoTreeGaussian pkg=EvoTrees
model = EvoTreeGaussian(max_depth=5, nbins=32, nrounds=100)
X, y = @load_boston
mach = machine(model, X, y) |> fit!
preds = predict(mach, X)
preds = predict_mean(mach, X)
preds = predict_mode(mach, X)
preds = predict_median(mach, X)
```
"""
EvoTreeGaussian
"""
EvoTreeMLE(;kwargs...)
A model type for constructing a EvoTreeMLE, based on [EvoTrees.jl](https://github.com/Evovest/EvoTrees.jl), and implementing both an internal API the MLJ model interface.
EvoTreeMLE performs maximum likelihood estimation. Assumed distribution is specified through `loss` kwargs. Both Gaussian and Logistic distributions are supported.
# Hyper-parameters
`loss=:gaussian`: Loss to be be minimized during training. One of:
- `:gaussian` / `:gaussian_mle`
- `:logistic` / `:logistic_mle`
- `nrounds=100`: Number of rounds. It corresponds to the number of trees that will be sequentially stacked. Must be >= 1.
- `eta=0.1`: Learning rate. Each tree raw predictions are scaled by `eta` prior to be added to the stack of predictions. Must be > 0.
A lower `eta` results in slower learning, requiring a higher `nrounds` but typically improves model performance.
- `L2::T=0.0`: L2 regularization factor on aggregate gain. Must be >= 0. Higher L2 can result in a more robust model.
- `lambda::T=0.0`: L2 regularization factor on individual gain. Must be >= 0. Higher lambda can result in a more robust model.
- `gamma::T=0.0`: Minimum gain imprvement needed to perform a node split. Higher gamma can result in a more robust model. Must be >= 0.
- `max_depth=6`: Maximum depth of a tree. Must be >= 1. A tree of depth 1 is made of a single prediction leaf.
A complete tree of depth N contains `2^(N - 1)` terminal leaves and `2^(N - 1) - 1` split nodes.
Compute cost is proportional to 2^max_depth. Typical optimal values are in the 3 to 9 range.
- `min_weight=8.0`: Minimum weight needed in a node to perform a split. Matches the number of observations by default or the sum of weights as provided by the `weights` vector. Must be > 0.
- `rowsample=1.0`: Proportion of rows that are sampled at each iteration to build the tree. Should be in `]0, 1]`.
- `colsample=1.0`: Proportion of columns / features that are sampled at each iteration to build the tree. Should be in `]0, 1]`.
- `nbins=64`: Number of bins into which each feature is quantized. Buckets are defined based on quantiles, hence resulting in equal weight bins. Should be between 2 and 255.
- `monotone_constraints=Dict{Int, Int}()`: Specify monotonic constraints using a dict where the key is the feature index and the value the applicable constraint (-1=decreasing, 0=none, 1=increasing).
!Experimental feature: note that for MLE regression, constraints may not be enforced systematically.
- `tree_type="binary"` Tree structure to be used. One of:
- `binary`: Each node of a tree is grown independently. Tree are built depthwise until max depth is reach or if min weight or gain (see `gamma`) stops further node splits.
- `oblivious`: A common splitting condition is imposed to all nodes of a given depth.
- `rng=123`: Either an integer used as a seed to the random number generator or an actual random number generator (`::Random.AbstractRNG`).
# Internal API
Do `config = EvoTreeMLE()` to construct an instance with default hyper-parameters.
Provide keyword arguments to override hyper-parameter defaults, as in EvoTreeMLE(max_depth=...).
## Training model
A model is built using [`fit_evotree`](@ref):
```julia
model = fit_evotree(config; x_train, y_train, kwargs...)
```
## Inference
Predictions are obtained using [`predict`](@ref) which returns a `Matrix` of size `[nobs, nparams]` where the second dimensions refer to `μ` & `σ` for Normal/Gaussian and `μ` & `s` for Logistic.
```julia
EvoTrees.predict(model, X)
```
Alternatively, models act as a functor, returning predictions when called as a function with features as argument:
```julia
model(X)
```
# MLJ
From MLJ, the type can be imported using:
```julia
EvoTreeMLE = @load EvoTreeMLE pkg=EvoTrees
```
Do `model = EvoTreeMLE()` to construct an instance with default hyper-parameters.
Provide keyword arguments to override hyper-parameter defaults, as in `EvoTreeMLE(loss=...)`.
## Training data
In MLJ or MLJBase, bind an instance `model` to data with
mach = machine(model, X, y)
where
- `X`: any table of input features (eg, a `DataFrame`) whose columns
each have one of the following element scitypes: `Continuous`,
`Count`, or `<:OrderedFactor`; check column scitypes with `schema(X)`
- `y`: is the target, which can be any `AbstractVector` whose element
scitype is `<:Continuous`; check the scitype
with `scitype(y)`
Train the machine using `fit!(mach, rows=...)`.
## Operations
- `predict(mach, Xnew)`: returns a vector of Gaussian or Logistic distributions (according to provided `loss`) given features `Xnew` having the same scitype as `X` above.
Predictions are probabilistic.
Specific metrics can also be predicted using:
- `predict_mean(mach, Xnew)`
- `predict_mode(mach, Xnew)`
- `predict_median(mach, Xnew)`
## Fitted parameters
The fields of `fitted_params(mach)` are:
- `:fitresult`: The `GBTree` object returned by EvoTrees.jl fitting algorithm.
## Report
The fields of `report(mach)` are:
- `:features`: The names of the features encountered in training.
# Examples
```
# Internal API
using EvoTrees
config = EvoTreeMLE(max_depth=5, nbins=32, nrounds=100)
nobs, nfeats = 1_000, 5
x_train, y_train = randn(nobs, nfeats), rand(nobs)
model = fit_evotree(config; x_train, y_train)
preds = EvoTrees.predict(model, x_train)
```
```
# MLJ Interface
using MLJ
EvoTreeMLE = @load EvoTreeMLE pkg=EvoTrees
model = EvoTreeMLE(max_depth=5, nbins=32, nrounds=100)
X, y = @load_boston
mach = machine(model, X, y) |> fit!
preds = predict(mach, X)
preds = predict_mean(mach, X)
preds = predict_mode(mach, X)
preds = predict_median(mach, X)
```
"""
EvoTreeMLE
# function MLJ.clean!(model::EvoTreeRegressor)
# warning = ""
# if model.nrounds < 1
# warning *= "Need nrounds ≥ 1. Resetting nrounds=1. "
# model.nrounds = 1
# end
# if model.lambda < 0
# warning *= "Need lambda ≥ 0. Resetting lambda=0. "
# model.lambda = 0.0
# end
# if model.gamma < 0
# warning *= "Need gamma ≥ 0. Resetting gamma=0. "
# model.gamma = 0.0
# end
# if model.η <= 0
# warning *= "Need η > 0. Resetting η=0.001. "
# model.η = 0.001
# end
# if model.max_depth < 1
# warning *= "Need max_depth ≥ 0. Resetting max_depth=0. "
# model.max_depth = 1
# end
# if model.min_weight < 0
# warning *= "Need min_weight ≥ 0. Resetting min_weight=0. "
# model.min_weight = 0.0
# end
# if model.rowsample < 0
# warning *= "Need rowsample ≥ 0. Resetting rowsample=0. "
# model.rowsample = 0.0
# end
# if model.rowsample > 1
# warning *= "Need rowsample <= 1. Resetting rowsample=1. "
# model.rowsample = 1.0
# end
# if model.colsample < 0
# warning *= "Need colsample ≥ 0. Resetting colsample=0. "
# model.colsample = 0.0
# end
# if model.colsample > 1
# warning *= "Need colsample <= 1. Resetting colsample=1. "
# model.colsample = 1.0
# end
# if model.nbins > 250
# warning *= "Need nbins <= 250. Resetting nbins=250. "
# model.nbins = 250
# end
# return warning
# end
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 4534 | struct CallBack{B,P,Y,C,D}
feval::Function
x_bin::B
p::P
y::Y
w::C
eval::C
feattypes::D
end
function CallBack(
::EvoTypes{L},
m::EvoTree{L,K},
deval,
device::Type{<:Device};
target_name,
w_name=nothing,
offset_name=nothing,
metric) where {L,K}
T = Float32
_w_name = isnothing(w_name) ? Symbol("") : Symbol(w_name)
_offset_name = isnothing(offset_name) ? Symbol("") : Symbol(offset_name)
_target_name = Symbol(target_name)
feval = metric_dict[metric]
x_bin = binarize(deval; fnames=m.info[:fnames], edges=m.info[:edges])
nobs = length(Tables.getcolumn(deval, 1))
p = zeros(T, K, nobs)
y_eval = Tables.getcolumn(deval, _target_name)
if L == MLogLoss
if eltype(y_eval) <: CategoricalValue
levels = CategoricalArrays.levels(y_eval)
μ = zeros(T, K)
y = UInt32.(CategoricalArrays.levelcode.(y_eval))
else
levels = sort(unique(y_eval))
yc = CategoricalVector(y_eval, levels=levels)
μ = zeros(T, K)
y = UInt32.(CategoricalArrays.levelcode.(yc))
end
else
y = T.(y_eval)
end
V = device_array_type(device)
w = isnothing(w_name) ? device_ones(device, T, length(y)) : V{T}(Tables.getcolumn(deval, _w_name))
offset = !isnothing(offset_name) ? T.(Tables.getcolumn(deval, _offset_name)) : nothing
if !isnothing(offset)
L == LogLoss && (offset .= logit.(offset))
L in [Poisson, Gamma, Tweedie] && (offset .= log.(offset))
L == MultiClassRegression && (offset .= log.(offset))
L in [GaussianMLE, LogisticMLE] && (offset[:, 2] .= log.(offset[:, 2]))
offset = T.(offset)
p .+= offset'
end
return CallBack(feval, convert(V, x_bin), convert(V, p), convert(V, y), w, similar(w), convert(V, m.info[:feattypes]))
end
function CallBack(
::EvoTypes{L},
m::EvoTree{L,K},
x_eval::AbstractMatrix,
y_eval,
device::Type{<:Device};
w_eval=nothing,
offset_eval=nothing,
metric) where {L,K}
T = Float32
feval = metric_dict[metric]
x_bin = binarize(x_eval; fnames=m.info[:fnames], edges=m.info[:edges])
p = zeros(T, K, size(x_eval, 1))
if L == MLogLoss
if eltype(y_eval) <: CategoricalValue
levels = CategoricalArrays.levels(y_eval)
μ = zeros(T, K)
y = UInt32.(CategoricalArrays.levelcode.(y_eval))
else
levels = sort(unique(y_eval))
yc = CategoricalVector(y_eval, levels=levels)
μ = zeros(T, K)
y = UInt32.(CategoricalArrays.levelcode.(yc))
end
else
y = T.(y_eval)
end
V = device_array_type(device)
w = isnothing(w_eval) ? device_ones(device, T, length(y)) : V{T}(w_eval)
offset = !isnothing(offset_eval) ? T.(offset_eval) : nothing
if !isnothing(offset)
L == LogLoss && (offset .= logit.(offset))
L in [Poisson, Gamma, Tweedie] && (offset .= log.(offset))
L == MLogLoss && (offset .= log.(offset))
L in [GaussianMLE, LogisticMLE] && (offset[:, 2] .= log.(offset[:, 2]))
offset = T.(offset)
p .+= offset'
end
return CallBack(feval, convert(V, x_bin), convert(V, p), convert(V, y), w, similar(w), convert(V, m.info[:feattypes]))
end
function (cb::CallBack)(logger, iter, tree)
predict!(cb.p, tree, cb.x_bin, cb.feattypes)
metric = cb.feval(cb.p, cb.y, cb.w, cb.eval)
update_logger!(logger, iter, metric)
return nothing
end
function init_logger(; metric, maximise, early_stopping_rounds)
logger = Dict(
:name => String(metric),
:maximise => maximise,
:early_stopping_rounds => early_stopping_rounds,
:nrounds => 0,
:iter => Int[],
:metrics => Float64[],
:iter_since_best => 0,
:best_iter => 0,
:best_metric => 0.0,
)
return logger
end
function update_logger!(logger, iter, metric)
logger[:nrounds] = iter
push!(logger[:iter], iter)
push!(logger[:metrics], metric)
if iter == 0
logger[:best_metric] = metric
else
if (logger[:maximise] && metric > logger[:best_metric]) ||
(!logger[:maximise] && metric < logger[:best_metric])
logger[:best_metric] = metric
logger[:best_iter] = iter
logger[:iter_since_best] = 0
else
logger[:iter_since_best] += logger[:iter][end] - logger[:iter][end-1]
end
end
end
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 5070 | function mse(
p::AbstractMatrix{T},
y::AbstractVector,
w::AbstractVector,
eval::AbstractVector;
kwargs...
) where {T}
@threads for i in eachindex(y)
eval[i] = w[i] * (p[1, i] - y[i])^2
end
return sum(eval) / sum(w)
end
rmse(p::AbstractMatrix{T}, y::AbstractVector, w::AbstractVector; kwargs...) where {T} =
sqrt(mse(p, y, w))
function mae(
p::AbstractMatrix{T},
y::AbstractVector,
w::AbstractVector,
eval::AbstractVector;
kwargs...
) where {T}
@threads for i in eachindex(y)
eval[i] = w[i] * abs(p[1, i] - y[i])
end
return sum(eval) / sum(w)
end
function logloss(
p::AbstractMatrix{T},
y::AbstractVector,
w::AbstractVector,
eval::AbstractVector;
kwargs...
) where {T}
@threads for i in eachindex(y)
pred = sigmoid(p[1, i])
eval[i] = w[i] * (-y[i] * log(pred) + (y[i] - 1) * log(1 - pred))
end
return sum(eval) / sum(w)
end
function mlogloss(
p::AbstractMatrix{T},
y::AbstractVector,
w::AbstractVector,
eval::AbstractVector;
kwargs...
) where {T}
K = size(p, 1)
@threads for i in eachindex(y)
isum = zero(T)
@inbounds for k in 1:K
isum += exp(p[k, i])
end
@inbounds eval[i] = w[i] * (log(isum) - p[y[i], i])
end
return sum(eval) / sum(w)
end
function poisson(
p::AbstractMatrix{T},
y::AbstractVector,
w::AbstractVector,
eval::AbstractVector;
kwargs...
) where {T}
@threads for i in eachindex(y)
pred = exp(p[1, i])
eval[i] = w[i] * 2 * (y[i] * (log(y[i]) - log(pred)) + pred - y[i])
end
return sum(eval) / sum(w)
end
function gamma(
p::AbstractMatrix{T},
y::AbstractVector,
w::AbstractVector,
eval::AbstractVector;
kwargs...
) where {T}
@threads for i in eachindex(y)
pred = exp(p[1, i])
eval[i] = w[i] * 2 * (log(pred / y[i]) + y[i] / pred - 1)
end
return sum(eval) / sum(w)
end
function tweedie(
p::AbstractMatrix{T},
y::AbstractVector,
w::AbstractVector,
eval::AbstractVector;
kwargs...
) where {T}
rho = T(1.5)
@threads for i in eachindex(y)
pred = exp(p[1, i])
eval[i] =
w[i] *
2 *
(
y[i]^(2 - rho) / (1 - rho) / (2 - rho) - y[i] * pred^(1 - rho) / (1 - rho) +
pred^(2 - rho) / (2 - rho)
)
end
return sum(eval) / sum(w)
end
function gaussian_mle(
p::AbstractMatrix{T},
y::AbstractVector,
w::AbstractVector,
eval::AbstractVector;
kwargs...
) where {T}
@threads for i in eachindex(y)
eval[i] = -w[i] * (p[2, i] + (y[i] - p[1, i])^2 / (2 * exp(2 * p[2, i])))
end
return sum(eval) / sum(w)
end
function logistic_mle(
p::AbstractMatrix{T},
y::AbstractVector,
w::AbstractVector,
eval::AbstractVector;
kwargs...
) where {T}
@threads for i in eachindex(y)
eval[i] = w[i] * (log(1 / 4 * sech(exp(-p[2, i]) * (y[i] - p[1, i]))^2) - p[2, i])
end
return sum(eval) / sum(w)
end
function wmae(
p::AbstractMatrix{T},
y::AbstractVector,
w::AbstractVector,
eval::AbstractVector;
alpha=0.5,
kwargs...
) where {T}
@threads for i in eachindex(y)
eval[i] =
w[i] * (
alpha * max(y[i] - p[1, i], zero(T)) +
(1 - alpha) * max(p[1, i] - y[i], zero(T))
)
end
return sum(eval) / sum(w)
end
function gini_raw(p::AbstractVector, y::AbstractVector)
_y = y .- minimum(y)
if length(_y) < 2
return 0.0
end
random = cumsum(ones(length(p)) ./ length(p)^2)
y_sort = _y[sortperm(p)]
y_cum = cumsum(y_sort) ./ sum(_y) ./ length(p)
gini = sum(random .- y_cum)
return gini
end
function gini_norm(p::AbstractVector, y::AbstractVector)
if length(y) < 2
return 0.0
end
return gini_raw(y, p) / gini_raw(y, y)
end
function gini(
p::AbstractMatrix{T},
y::AbstractVector,
w::AbstractVector,
eval::AbstractVector;
kwargs...
) where {T}
return gini_norm(view(p, 1, :), y)
end
const metric_dict = Dict(
:mse => mse,
:rmse => rmse,
:mae => mae,
:logloss => logloss,
:mlogloss => mlogloss,
:poisson_deviance => poisson,
:poisson => poisson,
:gamma_deviance => gamma,
:gamma => gamma,
:tweedie_deviance => tweedie,
:tweedie => tweedie,
:gaussian_mle => gaussian_mle,
:gaussian => gaussian_mle,
:logistic_mle => logistic_mle,
:wmae => wmae,
:quantile => wmae,
:gini => gini,
)
is_maximise(::typeof(mse)) = false
is_maximise(::typeof(rmse)) = false
is_maximise(::typeof(mae)) = false
is_maximise(::typeof(logloss)) = false
is_maximise(::typeof(mlogloss)) = false
is_maximise(::typeof(poisson)) = false
is_maximise(::typeof(gamma)) = false
is_maximise(::typeof(tweedie)) = false
is_maximise(::typeof(gaussian_mle)) = true
is_maximise(::typeof(logistic_mle)) = true
is_maximise(::typeof(wmae)) = false
is_maximise(::typeof(gini)) = true | EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 9049 | """
get_edges(X::AbstractMatrix{T}; fnames, nbins, rng=Random.TaskLocalRNG()) where {T}
get_edges(df; fnames, nbins, rng=Random.TaskLocalRNG())
Get the histogram breaking points of the feature data.
"""
function get_edges(X::AbstractMatrix{T}; fnames, nbins, rng=Random.MersenneTwister()) where {T}
@assert T <: Real
nobs = min(size(X, 1), 1000 * nbins)
idx = sample(rng, 1:size(X, 1), nobs, replace=false, ordered=true)
nfeats = size(X, 2)
edges = Vector{Vector{T}}(undef, nfeats)
featbins = Vector{UInt8}(undef, nfeats)
feattypes = Vector{Bool}(undef, nfeats)
@threads for j in 1:size(X, 2)
edges[j] = quantile(view(X, idx, j), (1:nbins-1) / nbins)
if length(edges[j]) == 1
edges[j] = [minimum(view(X, idx, j))]
end
featbins[j] = length(edges[j]) + 1
feattypes[j] = true
end
return edges, featbins, feattypes
end
function get_edges(df; fnames, nbins, rng=Random.MersenneTwister())
_nobs = Tables.DataAPI.nrow(df)
nobs = min(_nobs, 1000 * nbins)
idx = sample(rng, 1:_nobs, nobs, replace=false, ordered=true)
edges = Vector{Any}([Vector{eltype(Tables.getcolumn(df, col))}() for col in fnames])
nfeats = length(fnames)
featbins = Vector{UInt8}(undef, nfeats)
feattypes = Vector{Bool}(undef, nfeats)
@threads for j in eachindex(fnames)
col = view(Tables.getcolumn(df, fnames[j]), idx)
if eltype(col) <: Bool
edges[j] = [false, true]
featbins[j] = 2
feattypes[j] = false
elseif eltype(col) <: CategoricalValue
edges[j] = levels(col)
featbins[j] = length(edges[j])
feattypes[j] = isordered(col) ? true : false
@assert featbins[j] <= 255 "Max categorical levels currently limited to 255, $(fnames[j]) has $(featbins[j])."
elseif eltype(col) <: Real
edges[j] = unique(quantile(col, (1:nbins-1) / nbins))
featbins[j] = length(edges[j]) + 1
feattypes[j] = true
else
@error "Invalid feature eltype: $(fnames[j]) is $(eltype(col))"
end
if length(edges[j]) == 1
edges[j] = [minimum(col)]
end
end
return edges, featbins, feattypes
end
"""
binarize(X::AbstractMatrix; fnames, edges)
binarize(df; fnames, edges)
Transform feature data into a UInt8 binarized matrix.
"""
function binarize(X::AbstractMatrix; fnames, edges)
x_bin = zeros(UInt8, size(X))
@threads for j in axes(X, 2)
x_bin[:, j] .= searchsortedfirst.(Ref(edges[j]), view(X, :, j))
end
return x_bin
end
function binarize(df; fnames, edges)
nobs = length(Tables.getcolumn(df, 1))
x_bin = zeros(UInt8, nobs, length(fnames))
@threads for j in eachindex(fnames)
col = Tables.getcolumn(df, fnames[j])
if eltype(col) <: Bool
x_bin[:, j] .= col .+ 1
elseif eltype(col) <: CategoricalValue
x_bin[:, j] .= levelcode.(col)
elseif eltype(col) <: Real
x_bin[:, j] .= searchsortedfirst.(Ref(edges[j]), col)
else
@error "Invalid feature eltype: $(fnames[j]) is $(eltype(col))"
end
end
return x_bin
end
"""
Multi-threaded split_set!
Take a view into left and right placeholders. Right ids are assigned at the end of the length of the current node set.
"""
function split_set_chunk!(
left,
right,
is,
bid,
nblocks,
x_bin,
feat,
cond_bin,
feattype,
offset,
chunk_size,
)
left_count = 0
right_count = 0
i = chunk_size * (bid - 1) + 1
bid == nblocks ? bsize = length(is) - chunk_size * (bid - 1) : bsize = chunk_size
i_max = i + bsize - 1
@inbounds while i <= i_max
cond = feattype ? x_bin[is[i], feat] <= cond_bin : x_bin[is[i], feat] == cond_bin
if cond
left_count += 1
left[offset+chunk_size*(bid-1)+left_count] = is[i]
else
right_count += 1
right[offset+chunk_size*(bid-1)+right_count] = is[i]
end
i += 1
end
return left_count, right_count
end
function split_views_kernel!(
out::Vector{S},
left::Vector{S},
right::Vector{S},
bid,
offset,
chunk_size,
lefts,
rights,
sum_lefts,
cumsum_lefts,
cumsum_rights,
) where {S}
iter = 1
i_max = lefts[bid]
bid == 1 ? cumsum_left = 0 : cumsum_left = cumsum_lefts[bid-1]
@inbounds while iter <= i_max
out[offset+cumsum_left+iter] = left[offset+chunk_size*(bid-1)+iter]
iter += 1
end
iter = 1
i_max = rights[bid]
bid == 1 ? cumsum_right = 0 : cumsum_right = cumsum_rights[bid-1]
@inbounds while iter <= i_max
out[offset+sum_lefts+cumsum_right+iter] = right[offset+chunk_size*(bid-1)+iter]
iter += 1
end
return nothing
end
function split_set_threads!(
out,
left,
right,
is,
x_bin::Matrix{S},
feat,
cond_bin,
feattype,
offset,
) where {S}
chunk_size = cld(length(is), min(cld(length(is), 16_000), Threads.nthreads()))
nblocks = cld(length(is), chunk_size)
lefts = zeros(Int, nblocks)
rights = zeros(Int, nblocks)
@threads for bid = 1:nblocks
lefts[bid], rights[bid] = split_set_chunk!(
left,
right,
is,
bid,
nblocks,
x_bin,
feat,
cond_bin,
feattype,
offset,
chunk_size,
)
end
sum_lefts = sum(lefts)
cumsum_lefts = cumsum(lefts)
cumsum_rights = cumsum(rights)
@threads for bid = 1:nblocks
split_views_kernel!(
out,
left,
right,
bid,
offset,
chunk_size,
lefts,
rights,
sum_lefts,
cumsum_lefts,
cumsum_rights,
)
end
return (
view(out, offset+1:offset+sum_lefts),
view(out, offset+sum_lefts+1:offset+length(is)),
)
end
"""
update_hist!
GradientRegression
"""
function update_hist!(
::Type{L},
hist::Vector{Matrix{Float64}},
∇::Matrix{Float32},
x_bin::Matrix,
is::AbstractVector,
js::AbstractVector,
) where {L<:GradientRegression}
@threads for j in js
@inbounds @simd for i in is
bin = x_bin[i, j]
hist[j][1, bin] += ∇[1, i]
hist[j][2, bin] += ∇[2, i]
hist[j][3, bin] += ∇[3, i]
end
end
return nothing
end
"""
update_hist!
MLE2P
"""
function update_hist!(
::Type{L},
hist::Vector{Matrix{Float64}},
∇::Matrix{Float32},
x_bin::Matrix,
is::AbstractVector,
js::AbstractVector,
) where {L<:MLE2P}
@threads for j in js
@inbounds @simd for i in is
bin = x_bin[i, j]
hist[j][1, bin] += ∇[1, i]
hist[j][2, bin] += ∇[2, i]
hist[j][3, bin] += ∇[3, i]
hist[j][4, bin] += ∇[4, i]
hist[j][5, bin] += ∇[5, i]
end
end
return nothing
end
"""
update_hist!
Generic fallback - Softmax
"""
function update_hist!(
::Type{L},
hist::Vector{Matrix{Float64}},
∇::Matrix{Float32},
x_bin::Matrix,
is::AbstractVector,
js::AbstractVector,
) where {L}
@threads for j in js
@inbounds for i in is
bin = x_bin[i, j]
@inbounds @simd for k in axes(∇, 1)
hist[j][k, bin] += ∇[k, i]
end
end
end
return nothing
end
"""
update_gains!(
loss::L,
node::TrainNode{T},
js::Vector,
params::EvoTypes, K, monotone_constraints) where {L,T,S}
Generic fallback
"""
function update_gains!(
node::TrainNode,
js,
params::EvoTypes{L},
feattypes::Vector{Bool},
monotone_constraints,
) where {L}
h = node.h
hL = node.hL
hR = node.hR
gains = node.gains
∑ = node.∑
@inbounds for j in js
if feattypes[j]
cumsum!(hL[j], h[j], dims=2)
hR[j] .= ∑ .- hL[j]
else
hR[j] .= ∑ .- h[j]
hL[j] .= h[j]
end
monotone_constraint = monotone_constraints[j]
@inbounds for bin in eachindex(gains[j])
if hL[j][end, bin] > params.min_weight && hR[j][end, bin] > params.min_weight
if monotone_constraint != 0
predL = pred_scalar(view(hL[j], :, bin), params)
predR = pred_scalar(view(hR[j], :, bin), params)
end
if (monotone_constraint == 0) ||
(monotone_constraint == -1 && predL > predR) ||
(monotone_constraint == 1 && predL < predR)
gains[j][bin] =
get_gain(params, view(hL[j], :, bin)) +
get_gain(params, view(hR[j], :, bin))
end
end
end
end
return nothing
end
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 20348 | """
grow_evotree!(evotree::EvoTree{L,K}, cache, params::EvoTypes{L}, ::Type{<:Device}=CPU) where {L,K}
Given a instantiate
"""
function grow_evotree!(m::EvoTree{L,K}, cache, params::EvoTypes{L}, ::Type{<:Device}=CPU) where {L,K}
# compute gradients
update_grads!(cache.∇, cache.pred, cache.y, params)
# subsample rows
cache.nodes[1].is = subsample(cache.is_in, cache.is_out, cache.mask, params.rowsample, params.rng)
# subsample cols
sample!(params.rng, cache.js_, cache.js, replace=false, ordered=true)
# instantiate a tree then grow it
tree = Tree{L,K}(params.max_depth)
grow! = params.tree_type == "oblivious" ? grow_otree! : grow_tree!
grow!(
tree,
cache.nodes,
params,
cache.∇,
cache.edges,
cache.js,
cache.out,
cache.left,
cache.right,
cache.x_bin,
cache.feattypes,
cache.monotone_constraints
)
push!(m.trees, tree)
predict!(cache.pred, tree, cache.x_bin, cache.feattypes)
cache[:info][:nrounds] += 1
return nothing
end
# grow a single tree
function grow_tree!(
tree::Tree{L,K},
nodes::Vector{N},
params::EvoTypes{L},
∇::Matrix,
edges,
js,
out,
left,
right,
x_bin,
feattypes::Vector{Bool},
monotone_constraints
) where {L,K,N}
# reset nodes
for n in nodes
n.∑ .= 0
n.gain = 0.0
@inbounds for i in eachindex(n.h)
n.h[i] .= 0
n.gains[i] .= 0
end
end
# initialize
n_current = [1]
depth = 1
# initialize summary stats
nodes[1].∑ .= dropdims(sum(Float64, view(∇, :, nodes[1].is), dims=2), dims=2)
nodes[1].gain = get_gain(params, nodes[1].∑)
# grow while there are remaining active nodes
while length(n_current) > 0 && depth <= params.max_depth
offset = 0 # identifies breakpoint for each node set within a depth
n_next = Int[]
if depth < params.max_depth
for n_id in eachindex(n_current)
n = n_current[n_id]
if n_id % 2 == 0
if n % 2 == 0
@inbounds for j in js
nodes[n].h[j] .= nodes[n>>1].h[j] .- nodes[n+1].h[j]
end
else
@inbounds for j in js
nodes[n].h[j] .= nodes[n>>1].h[j] .- nodes[n-1].h[j]
end
end
else
update_hist!(L, nodes[n].h, ∇, x_bin, nodes[n].is, js)
end
end
@threads for n ∈ sort(n_current)
update_gains!(nodes[n], js, params, feattypes, monotone_constraints)
end
end
for n ∈ sort(n_current)
if depth == params.max_depth || nodes[n].∑[end] <= params.min_weight
pred_leaf_cpu!(tree.pred, n, nodes[n].∑, params, ∇, nodes[n].is)
else
best = findmax(findmax.(nodes[n].gains))
best_gain = best[1][1]
best_bin = best[1][2]
best_feat = best[2]
if best_gain > nodes[n].gain + params.gamma
tree.gain[n] = best_gain - nodes[n].gain
tree.cond_bin[n] = best_bin
tree.feat[n] = best_feat
tree.cond_float[n] = edges[tree.feat[n]][tree.cond_bin[n]]
tree.split[n] = best_bin != 0
_left, _right = split_set_threads!(
out,
left,
right,
nodes[n].is,
x_bin,
tree.feat[n],
tree.cond_bin[n],
feattypes[best_feat],
offset,
)
offset += length(nodes[n].is)
nodes[n<<1].is, nodes[n<<1+1].is = _left, _right
nodes[n<<1].∑ .= nodes[n].hL[best_feat][:, best_bin]
nodes[n<<1+1].∑ .= nodes[n].hR[best_feat][:, best_bin]
nodes[n<<1].gain = get_gain(params, nodes[n<<1].∑)
nodes[n<<1+1].gain = get_gain(params, nodes[n<<1+1].∑)
if length(_right) >= length(_left)
push!(n_next, n << 1)
push!(n_next, n << 1 + 1)
else
push!(n_next, n << 1 + 1)
push!(n_next, n << 1)
end
else
pred_leaf_cpu!(tree.pred, n, nodes[n].∑, params, ∇, nodes[n].is)
end
end
end
n_current = copy(n_next)
depth += 1
end # end of loop over active ids for a given depth
return nothing
end
# grow a single oblivious tree
function grow_otree!(
tree::Tree{L,K},
nodes::Vector{N},
params::EvoTypes{L},
∇::Matrix,
edges,
js,
out,
left,
right,
x_bin,
feattypes::Vector{Bool},
monotone_constraints
) where {L,K,N}
# reset nodes
for n in nodes
n.∑ .= 0
n.gain = 0.0
@inbounds for i in eachindex(n.h)
n.h[i] .= 0
n.gains[i] .= 0
end
end
# initialize
n_current = [1]
depth = 1
# initialize summary stats
nodes[1].∑ .= dropdims(sum(Float64, view(∇, :, nodes[1].is), dims=2), dims=2)
nodes[1].gain = get_gain(params, nodes[1].∑)
# grow while there are remaining active nodes
while length(n_current) > 0 && depth <= params.max_depth
offset = 0 # identifies breakpoint for each node set within a depth
n_next = Int[]
min_weight_flag = false
for n in n_current
nodes[n].∑[end] <= params.min_weight ? min_weight_flag = true : nothing
end
if depth == params.max_depth || min_weight_flag
for n in n_current
# @info "length(nodes[n].is)" length(nodes[n].is) depth n
pred_leaf_cpu!(tree.pred, n, nodes[n].∑, params, ∇, nodes[n].is)
end
else
# update histograms
for n_id in eachindex(n_current)
n = n_current[n_id]
if n_id % 2 == 0
if n % 2 == 0
@inbounds for j in js
nodes[n].h[j] .= nodes[n>>1].h[j] .- nodes[n+1].h[j]
end
else
@inbounds for j in js
nodes[n].h[j] .= nodes[n>>1].h[j] .- nodes[n-1].h[j]
end
end
else
update_hist!(L, nodes[n].h, ∇, x_bin, nodes[n].is, js)
end
end
@threads for n ∈ n_current
update_gains!(nodes[n], js, params, feattypes, monotone_constraints)
end
# initialize gains for node 1 in which all gains of a given depth will be accumulated
if depth > 1
@inbounds for j in js
nodes[1].gains[j] .= 0
end
end
gain = 0
# update gains based on the aggregation of all nodes of a given depth. One gains matrix per depth (vs one per node in binary trees).
for n ∈ sort(n_current)
if n > 1 # accumulate gains in node 1
for j in js
nodes[1].gains[j] .+= nodes[n].gains[j]
end
end
gain += nodes[n].gain
end
for n ∈ sort(n_current)
if n > 1
for j in js
nodes[1].gains[j] .*= nodes[n].gains[j] .> 0 #mask ignore gains if any node isn't eligible (too small per leaf weight)
end
end
end
# find best split
best = findmax(findmax.(nodes[1].gains))
best_gain = best[1][1]
best_bin = best[1][2]
best_feat = best[2]
if best_gain > gain + params.gamma
for n in sort(n_current)
tree.gain[n] = best_gain - nodes[n].gain
tree.cond_bin[n] = best_bin
tree.feat[n] = best_feat
tree.cond_float[n] = edges[best_feat][best_bin]
tree.split[n] = best_bin != 0
_left, _right = split_set_threads!(
out,
left,
right,
nodes[n].is,
x_bin,
tree.feat[n],
tree.cond_bin[n],
feattypes[best_feat],
offset,
)
offset += length(nodes[n].is)
nodes[n<<1].is, nodes[n<<1+1].is = _left, _right
nodes[n<<1].∑ .= nodes[n].hL[best_feat][:, best_bin]
nodes[n<<1+1].∑ .= nodes[n].hR[best_feat][:, best_bin]
nodes[n<<1].gain = get_gain(params, nodes[n<<1].∑)
nodes[n<<1+1].gain = get_gain(params, nodes[n<<1+1].∑)
if length(_right) >= length(_left)
push!(n_next, n << 1)
push!(n_next, n << 1 + 1)
else
push!(n_next, n << 1 + 1)
push!(n_next, n << 1)
end
end
else
for n in n_current
pred_leaf_cpu!(tree.pred, n, nodes[n].∑, params, ∇, nodes[n].is)
end
end
end
n_current = copy(n_next)
depth += 1
end # end of loop over current nodes for a given depth
return nothing
end
"""
fit_evotree(
params::EvoTypes{L},
dtrain;
target_name,
fnames=nothing,
w_name=nothing,
offset_name=nothing,
deval=nothing,
metric=nothing,
early_stopping_rounds=9999,
print_every_n=9999,
verbosity=1,
return_logger=false,
device="cpu")
Main training function. Performs model fitting given configuration `params`, `dtrain`, `target_name` and other optional kwargs.
# Arguments
- `params::EvoTypes`: configuration info providing hyper-paramters. `EvoTypes` can be one of:
- [`EvoTreeRegressor`](@ref)
- [`EvoTreeClassifier`](@ref)
- [`EvoTreeCount`](@ref)
- [`EvoTreeMLE`](@ref)
- `dtrain`: A Tables compatible training data (named tuples, DataFrame...) containing features and target variables.
# Keyword arguments
- `target_name`: name of target variable.
- `fnames = nothing`: the names of the `x_train` features. If provided, should be a vector of string with `length(fnames) = size(x_train, 2)`.
- `w_name = nothing`: name of the variable containing weights. If `nothing`, common weights on one will be used.
- `offset_name = nothing`: name of the offset variable.
- `deval`: A Tables compatible evaluation data containing features and target variables.
- `metric`: The evaluation metric that wil be tracked on `deval`.
Supported metrics are:
- `:mse`: mean-squared error. Adapted for general regression models.
- `:rmse`: root-mean-squared error (CPU only). Adapted for general regression models.
- `:mae`: mean absolute error. Adapted for general regression models.
- `:logloss`: Adapted for `:logistic` regression models.
- `:mlogloss`: Multi-class cross entropy. Adapted to `EvoTreeClassifier` classification models.
- `:poisson`: Poisson deviance. Adapted to `EvoTreeCount` count models.
- `:gamma`: Gamma deviance. Adapted to regression problem on Gamma like, positively distributed targets.
- `:tweedie`: Tweedie deviance. Adapted to regression problem on Tweedie like, positively distributed targets with probability mass at `y == 0`.
- `:gaussian_mle`: Gaussian maximum log-likelihood. Adapted to `EvoTreeMLE` models with `loss = :gaussian_mle`.
- `:logistic_mle`: Logistic maximum log-likelihood. Adapted to `EvoTreeMLE` models with `loss = :logistic_mle`.
- `early_stopping_rounds::Integer`: number of consecutive rounds without metric improvement after which fitting in stopped.
- `print_every_n`: sets at which frequency logging info should be printed.
- `verbosity`: set to 1 to print logging info during training.
- `return_logger::Bool = false`: if set to true (default), `fit_evotree` return a tuple `(m, logger)` where logger is a dict containing various tracking information.
- `device="cpu"`: Hardware device to use for computations. Can be either `"cpu"` or `"gpu"`. Following losses are not GPU supported at the moment`
:l1`, `:quantile`, `:logistic_mle`.
"""
function fit_evotree(
params::EvoTypes{L},
dtrain;
target_name,
fnames=nothing,
w_name=nothing,
offset_name=nothing,
deval=nothing,
metric=nothing,
early_stopping_rounds=9999,
print_every_n=9999,
verbosity=1,
return_logger=false,
device="cpu"
) where {L}
@assert Tables.istable(dtrain) "fit_evotree(params, dtrain) only accepts Tables compatible input for `dtrain` (ex: named tuples, DataFrames...)"
dtrain = Tables.columntable(dtrain)
verbosity == 1 && @info params
@assert string(device) ∈ ["cpu", "gpu"]
_device = string(device) == "cpu" ? CPU : GPU
m, cache = init(params, dtrain, _device; target_name, fnames, w_name, offset_name)
# initialize callback and logger if tracking eval data
metric = isnothing(metric) ? nothing : Symbol(metric)
logging_flag = !isnothing(metric) && !isnothing(deval)
any_flag = !isnothing(metric) || !isnothing(deval)
if !logging_flag && any_flag
@warn "To track eval metric in logger, both `metric` and `deval` must be provided."
end
if logging_flag
deval = Tables.columntable(deval)
cb = CallBack(params, m, deval, _device; target_name, w_name, offset_name, metric)
logger = init_logger(; metric, maximise=is_maximise(cb.feval), early_stopping_rounds)
cb(logger, 0, m.trees[end])
(verbosity > 0) && @info "initialization" metric = logger[:metrics][end]
else
logger, cb = nothing, nothing
end
for i = 1:params.nrounds
grow_evotree!(m, cache, params, _device)
if !isnothing(logger)
cb(logger, i, m.trees[end])
if i % print_every_n == 0 && verbosity > 0
@info "iter $i" metric = logger[:metrics][end]
end
(logger[:iter_since_best] >= logger[:early_stopping_rounds]) && break
end
end
post_fit_gc(_device)
if return_logger
return (m, logger)
else
return m
end
end
# A no-op on the CPU, but on the GPU we perform garbage collection
post_fit_gc(::Type{<:CPU}) = nothing
"""
fit_evotree(
params::EvoTypes{L};
x_train::AbstractMatrix,
y_train::AbstractVector,
w_train=nothing,
offset_train=nothing,
x_eval=nothing,
y_eval=nothing,
w_eval=nothing,
offset_eval=nothing,
early_stopping_rounds=9999,
print_every_n=9999,
verbosity=1)
Main training function. Performs model fitting given configuration `params`, `x_train`, `y_train` and other optional kwargs.
# Arguments
- `params::EvoTypes`: configuration info providing hyper-paramters. `EvoTypes` can be one of:
- [`EvoTreeRegressor`](@ref)
- [`EvoTreeClassifier`](@ref)
- [`EvoTreeCount`](@ref)
- [`EvoTreeMLE`](@ref)
# Keyword arguments
- `x_train::Matrix`: training data of size `[#observations, #features]`.
- `y_train::Vector`: vector of train targets of length `#observations`.
- `w_train::Vector`: vector of train weights of length `#observations`. If `nothing`, a vector of ones is assumed.
- `offset_train::VecOrMat`: offset for the training data. Should match the size of the predictions.
- `x_eval::Matrix`: evaluation data of size `[#observations, #features]`.
- `y_eval::Vector`: vector of evaluation targets of length `#observations`.
- `w_eval::Vector`: vector of evaluation weights of length `#observations`. Defaults to `nothing` (assumes a vector of 1s).
- `offset_eval::VecOrMat`: evaluation data offset. Should match the size of the predictions.
- `metric`: The evaluation metric that wil be tracked on `x_eval`, `y_eval` and optionally `w_eval` / `offset_eval` data.
Supported metrics are:
- `:mse`: mean-squared error. Adapted for general regression models.
- `:rmse`: root-mean-squared error (CPU only). Adapted for general regression models.
- `:mae`: mean absolute error. Adapted for general regression models.
- `:logloss`: Adapted for `:logistic` regression models.
- `:mlogloss`: Multi-class cross entropy. Adapted to `EvoTreeClassifier` classification models.
- `:poisson`: Poisson deviance. Adapted to `EvoTreeCount` count models.
- `:gamma`: Gamma deviance. Adapted to regression problem on Gamma like, positively distributed targets.
- `:tweedie`: Tweedie deviance. Adapted to regression problem on Tweedie like, positively distributed targets with probability mass at `y == 0`.
- `:gaussian_mle`: Gaussian maximum log-likelihood. Adapted to `EvoTreeMLE` models with `loss = :gaussian_mle`.
- `:logistic_mle`: Logistic maximum log-likelihood. Adapted to `EvoTreeMLE` models with `loss = :logistic_mle`.
- `early_stopping_rounds::Integer`: number of consecutive rounds without metric improvement after which fitting in stopped.
- `print_every_n`: sets at which frequency logging info should be printed.
- `verbosity`: set to 1 to print logging info during training.
- `fnames`: the names of the `x_train` features. If provided, should be a vector of string with `length(fnames) = size(x_train, 2)`.
- `return_logger::Bool = false`: if set to true (default), `fit_evotree` return a tuple `(m, logger)` where logger is a dict containing various tracking information.
- `device="cpu"`: Hardware device to use for computations. Can be either `"cpu"` or `"gpu"`. Following losses are not GPU supported at the moment`
:l1`, `:quantile`, `:logistic_mle`.
"""
function fit_evotree(
params::EvoTypes{L};
x_train::AbstractMatrix,
y_train::AbstractVector,
w_train=nothing,
offset_train=nothing,
x_eval=nothing,
y_eval=nothing,
w_eval=nothing,
offset_eval=nothing,
metric=nothing,
early_stopping_rounds=9999,
print_every_n=9999,
verbosity=1,
fnames=nothing,
return_logger=false,
device="cpu"
) where {L}
verbosity == 1 && @info params
@assert string(device) ∈ ["cpu", "gpu"]
_device = string(device) == "cpu" ? CPU : GPU
m, cache = init(params, x_train, y_train, _device; fnames, w_train, offset_train)
# initialize callback and logger if tracking eval data
metric = isnothing(metric) ? nothing : Symbol(metric)
logging_flag = !isnothing(metric) && !isnothing(x_eval) && !isnothing(y_eval)
any_flag = !isnothing(metric) || !isnothing(x_eval) || !isnothing(y_eval)
if !logging_flag && any_flag
@warn "To track eval metric in logger, `metric`, `x_eval` and `y_eval` must all be provided."
end
if logging_flag
cb = CallBack(params, m, x_eval, y_eval, _device; w_eval, offset_eval, metric)
logger = init_logger(; metric, maximise=is_maximise(cb.feval), early_stopping_rounds)
cb(logger, 0, m.trees[end])
(verbosity > 0) && @info "initialization" metric = logger[:metrics][end]
else
logger, cb = nothing, nothing
end
for i = 1:params.nrounds
grow_evotree!(m, cache, params, _device)
if !isnothing(logger)
cb(logger, i, m.trees[end])
if i % print_every_n == 0 && verbosity > 0
@info "iter $i" metric = logger[:metrics][end]
end
(logger[:iter_since_best] >= logger[:early_stopping_rounds]) && break
end
end
post_fit_gc(_device)
if return_logger
return (m, logger)
else
return m
end
end
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 807 | function importance!(gain::AbstractVector, tree::Tree)
@inbounds for n in eachindex(tree.split)
if tree.split[n]
gain[tree.feat[n]] += tree.gain[n]
end
end
end
"""
importance(model::EvoTree; fnames=model.info[:fnames])
Sorted normalized feature importance based on loss function gain.
Feature names associated to the model are stored in `model.info[:fnames]` as a string `Vector` and can be updated at any time. Eg: `model.info[:fnames] = new_fnames_vec`.
"""
function importance(model::EvoTree; fnames=model.info[:fnames])
gain = zeros(length(fnames))
for tree in model.trees
importance!(gain, tree)
end
gain .= gain ./ sum(gain)
pairs = collect(Dict(zip(string.(fnames), gain)))
sort!(pairs, by=x -> -x[2])
return pairs
end
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 6834 | function init_core(params::EvoTypes{L}, ::Type{CPU}, data, fnames, y_train, w, offset) where {L}
# binarize data into quantiles
edges, featbins, feattypes = get_edges(data; fnames, nbins=params.nbins, rng=params.rng)
x_bin = binarize(data; fnames, edges)
nobs, nfeats = size(x_bin)
T = Float32
target_levels = nothing
target_isordered = false
if L == Logistic
@assert eltype(y_train) <: Real && minimum(y_train) >= 0 && maximum(y_train) <= 1
K = 1
y = T.(y_train)
μ = [logit(mean(y))]
!isnothing(offset) && (offset .= logit.(offset))
elseif L in [Poisson, Gamma, Tweedie]
@assert eltype(y_train) <: Real
K = 1
y = T.(y_train)
μ = fill(log(mean(y)), 1)
!isnothing(offset) && (offset .= log.(offset))
elseif L == MLogLoss
if eltype(y_train) <: CategoricalValue
target_levels = CategoricalArrays.levels(y_train)
target_isordered = isordered(y_train)
y = UInt32.(CategoricalArrays.levelcode.(y_train))
elseif eltype(y_train) <: Integer || eltype(y_train) <: Bool || eltype(y_train) <: String || eltype(y_train) <: Char
target_levels = sort(unique(y_train))
yc = CategoricalVector(y_train, levels=target_levels)
y = UInt32.(CategoricalArrays.levelcode.(yc))
else
@error "Invalid target eltype: $(eltype(y_train))"
end
K = length(target_levels)
μ = T.(log.(proportions(y, UInt32(1):UInt32(K))))
μ .-= maximum(μ)
!isnothing(offset) && (offset .= log.(offset))
elseif L == GaussianMLE
@assert eltype(y_train) <: Real
K = 2
y = T.(y_train)
μ = [mean(y), log(std(y))]
!isnothing(offset) && (offset[:, 2] .= log.(offset[:, 2]))
elseif L == LogisticMLE
@assert eltype(y_train) <: Real
K = 2
y = T.(y_train)
μ = [mean(y), log(std(y) * sqrt(3) / π)]
!isnothing(offset) && (offset[:, 2] .= log.(offset[:, 2]))
else
@assert eltype(y_train) <: Real
K = 1
y = T.(y_train)
μ = [mean(y)]
end
μ = T.(μ)
# force a neutral/zero bias/initial tree when offset is specified
!isnothing(offset) && (μ .= 0)
@assert (length(y) == length(w) && minimum(w) > 0)
# initialize preds
pred = zeros(T, K, nobs)
pred .= μ
!isnothing(offset) && (pred .+= offset')
# initialize gradients
∇ = zeros(T, 2 * K + 1, nobs)
∇[end, :] .= w
# initialize indexes
is_in = zeros(UInt32, nobs)
is_out = zeros(UInt32, nobs)
mask = zeros(UInt8, nobs)
js_ = UInt32.(collect(1:nfeats))
js = zeros(UInt32, ceil(Int, params.colsample * nfeats))
out = zeros(UInt32, nobs)
left = zeros(UInt32, nobs)
right = zeros(UInt32, nobs)
# assign monotone contraints in constraints vector
monotone_constraints = zeros(Int32, nfeats)
hasproperty(params, :monotone_constraints) && for (k, v) in params.monotone_constraints
monotone_constraints[k] = v
end
# model info
info = Dict(
:fnames => fnames,
:target_levels => target_levels,
:target_isordered => target_isordered,
:edges => edges,
:featbins => featbins,
:feattypes => feattypes,
)
# initialize model
nodes = [TrainNode(featbins, K, view(is_in, 1:0)) for n = 1:2^params.max_depth-1]
bias = [Tree{L,K}(μ)]
m = EvoTree{L,K}(bias, info)
# build cache
cache = (
info=Dict(:nrounds => 0),
x_bin=x_bin,
y=y,
w=w,
pred=pred,
K=K,
nodes=nodes,
is_in=is_in,
is_out=is_out,
mask=mask,
js_=js_,
js=js,
out=out,
left=left,
right=right,
∇=∇,
edges=edges,
fnames=fnames,
featbins=featbins,
feattypes=feattypes,
monotone_constraints=monotone_constraints,
)
return m, cache
end
"""
init(
params::EvoTypes,
dtrain,
device::Type{<:Device}=CPU;
target_name,
fnames=nothing,
w_name=nothing,
offset_name=nothing
)
Initialise EvoTree
"""
function init(
params::EvoTypes,
dtrain,
device::Type{<:Device}=CPU;
target_name,
fnames=nothing,
w_name=nothing,
offset_name=nothing
)
# set fnames
schema = Tables.schema(dtrain)
_w_name = isnothing(w_name) ? Symbol("") : Symbol(w_name)
_offset_name = isnothing(offset_name) ? Symbol("") : Symbol(offset_name)
_target_name = Symbol(target_name)
if isnothing(fnames)
fnames = Symbol[]
for i in eachindex(schema.names)
if schema.types[i] <: Union{Real,CategoricalValue}
push!(fnames, schema.names[i])
end
end
fnames = setdiff(fnames, union([_target_name], [_w_name], [_offset_name]))
else
isa(fnames, String) ? fnames = [fnames] : nothing
fnames = Symbol.(fnames)
@assert isa(fnames, Vector{Symbol})
@assert all(fnames .∈ Ref(schema.names))
for name in fnames
@assert schema.types[findfirst(name .== schema.names)] <: Union{Real,CategoricalValue}
end
end
T = Float32
nobs = length(Tables.getcolumn(dtrain, 1))
y_train = Tables.getcolumn(dtrain, _target_name)
V = device_array_type(device)
w = isnothing(w_name) ? device_ones(device, T, nobs) : V{T}(Tables.getcolumn(dtrain, _w_name))
offset = isnothing(offset_name) ? nothing : V{T}(Tables.getcolumn(dtrain, _offset_name))
m, cache = init_core(params, device, dtrain, fnames, y_train, w, offset)
return m, cache
end
# This should be different on CPUs and GPUs
device_ones(::Type{<:CPU}, ::Type{T}, n::Int) where {T} = ones(T, n)
device_array_type(::Type{<:CPU}) = Array
"""
init(
params::EvoTypes,
x_train::AbstractMatrix,
y_train::AbstractVector,
device::Type{<:Device}=CPU;
fnames=nothing,
w_train=nothing,
offset_train=nothing
)
Initialise EvoTree
"""
function init(
params::EvoTypes,
x_train::AbstractMatrix,
y_train::AbstractVector,
device::Type{<:Device}=CPU;
fnames=nothing,
w_train=nothing,
offset_train=nothing
)
# initialize model and cache
fnames = isnothing(fnames) ? [Symbol("feat_$i") for i in axes(x_train, 2)] : Symbol.(fnames)
@assert length(fnames) == size(x_train, 2)
T = Float32
nobs = size(x_train, 1)
V = device_array_type(device)
w = isnothing(w_train) ? device_ones(device, T, nobs) : V{T}(w_train)
offset = isnothing(offset_train) ? nothing : V{T}(offset_train)
m, cache = init_core(params, device, x_train, fnames, y_train, w, offset)
return m, cache
end
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 5912 | # MSE
function update_grads!(∇::Matrix, p::Matrix, y::Vector, ::EvoTreeRegressor{L}) where {L<:MSE}
@threads for i in eachindex(y)
@inbounds ∇[1, i] = 2 * (p[1, i] - y[i]) * ∇[3, i]
@inbounds ∇[2, i] = 2 * ∇[3, i]
end
end
# LogLoss - on linear predictor
function update_grads!(∇::Matrix, p::Matrix, y::Vector, ::EvoTreeRegressor{L}) where {L<:LogLoss}
@threads for i in eachindex(y)
@inbounds pred = sigmoid(p[1, i])
@inbounds ∇[1, i] = (pred - y[i]) * ∇[3, i]
@inbounds ∇[2, i] = pred * (1 - pred) * ∇[3, i]
end
end
# Poisson
function update_grads!(∇::Matrix, p::Matrix, y::Vector, ::EvoTreeCount{L}) where {L<:Poisson}
@threads for i in eachindex(y)
@inbounds pred = exp(p[1, i])
@inbounds ∇[1, i] = (pred - y[i]) * ∇[3, i]
@inbounds ∇[2, i] = pred * ∇[3, i]
end
end
# Gamma
function update_grads!(∇::Matrix, p::Matrix, y::Vector, ::EvoTreeRegressor{L}) where {L<:Gamma}
@threads for i in eachindex(y)
@inbounds pred = exp(p[1, i])
@inbounds ∇[1, i] = 2 * (1 - y[i] / pred) * ∇[3, i]
@inbounds ∇[2, i] = 2 * y[i] / pred * ∇[3, i]
end
end
# Tweedie
function update_grads!(∇::Matrix, p::Matrix, y::Vector, ::EvoTreeRegressor{L}) where {L<:Tweedie}
rho = eltype(p)(1.5)
@threads for i in eachindex(y)
@inbounds pred = exp(p[1, i])
@inbounds ∇[1, i] = 2 * (pred^(2 - rho) - y[i] * pred^(1 - rho)) * ∇[3, i]
@inbounds ∇[2, i] =
2 * ((2 - rho) * pred^(2 - rho) - (1 - rho) * y[i] * pred^(1 - rho)) * ∇[3, i]
end
end
# L1
function update_grads!(∇::Matrix, p::Matrix, y::Vector, params::EvoTreeRegressor{L}) where {L<:L1}
@threads for i in eachindex(y)
@inbounds ∇[1, i] =
(params.alpha * max(y[i] - p[1, i], 0) - (1 - params.alpha) * max(p[1, i] - y[i], 0)) *
∇[3, i]
end
end
# MLogLoss
function update_grads!(∇::Matrix{T}, p::Matrix, y::Vector, ::EvoTreeClassifier{L}) where {L<:MLogLoss,T}
K = size(p, 1)
@threads for i in eachindex(y)
isum = zero(T)
@inbounds for k = 1:K
isum += exp(p[k, i])
end
@inbounds for k = 1:K
iexp = exp(p[k, i])
if k == y[i]
∇[k, i] = (iexp / isum - 1) * ∇[end, i]
else
∇[k, i] = iexp / isum * ∇[end, i]
end
∇[k+K, i] = 1 / isum * (1 - iexp / isum) * ∇[end, i]
end
end
end
# Quantile
function update_grads!(∇::Matrix, p::Matrix, y::Vector, params::EvoTreeRegressor{L}) where {L<:Quantile}
@threads for i in eachindex(y)
@inbounds ∇[1, i] = y[i] > p[1, i] ? params.alpha * ∇[3, i] : (params.alpha - 1) * ∇[3, i]
@inbounds ∇[2, i] = y[i] - p[1, i] # δ² serves to calculate the quantile value - hence no weighting on δ²
end
end
# Gaussian - http://jrmeyer.github.io/machinelearning/2017/08/18/mle.html
# pred[i][1] = μ
# pred[i][2] = log(σ)
function update_grads!(∇::Matrix, p::Matrix, y::Vector, ::Union{EvoTreeGaussian{L},EvoTreeMLE{L}}) where {L<:GaussianMLE}
@threads for i in eachindex(y)
# first order
@inbounds ∇[1, i] = (p[1, i] - y[i]) / exp(2 * p[2, i]) * ∇[5, i]
@inbounds ∇[2, i] = (1 - (p[1, i] - y[i])^2 / exp(2 * p[2, i])) * ∇[5, i]
# second order
@inbounds ∇[3, i] = ∇[5, i] / exp(2 * p[2, i])
@inbounds ∇[4, i] = ∇[5, i] * 2 / exp(2 * p[2, i]) * (p[1, i] - y[i])^2
end
end
# LogisticProb - https://en.wikipedia.org/wiki/Logistic_distribution
# pdf =
# pred[i][1] = μ
# pred[i][2] = log(s)
function update_grads!(∇::Matrix, p::Matrix, y::Vector, ::EvoTreeMLE{L}) where {L<:LogisticMLE}
@threads for i in eachindex(y)
# first order
@inbounds ∇[1, i] =
-tanh((y[i] - p[1, i]) / (2 * exp(p[2, i]))) * exp(-p[2, i]) * ∇[5, i]
@inbounds ∇[2, i] =
-(
exp(-p[2, i]) *
(y[i] - p[1, i]) *
tanh((y[i] - p[1, i]) / (2 * exp(p[2, i]))) - 1
) * ∇[5, i]
# second order
@inbounds ∇[3, i] =
sech((y[i] - p[1, i]) / (2 * exp(p[2, i])))^2 / (2 * exp(2 * p[2, i])) *
∇[5, i]
@inbounds ∇[4, i] =
(
exp(-2 * p[2, i]) *
(p[1, i] - y[i]) *
(p[1, i] - y[i] + exp(p[2, i]) * sinh(exp(-p[2, i]) * (p[1, i] - y[i])))
) / (1 + cosh(exp(-p[2, i]) * (p[1, i] - y[i]))) * ∇[5, i]
end
end
# utility functions
function logit(x::AbstractArray{T}) where {T<:AbstractFloat}
return logit.(x)
end
@inline function logit(x::T) where {T<:AbstractFloat}
@fastmath log(x / (1 - x))
end
function sigmoid(x::AbstractArray{T}) where {T<:AbstractFloat}
return sigmoid.(x)
end
@inline function sigmoid(x::T) where {T<:AbstractFloat}
@fastmath 1 / (1 + exp(-x))
end
##############################
# get the gain metric
##############################
# GradientRegression
function get_gain(params::EvoTypes{L}, ∑::AbstractVector) where {L<:GradientRegression}
ϵ = eps(eltype(∑))
∑[1]^2 / max(ϵ, (∑[2] + params.lambda * ∑[3] + params.L2)) / 2
end
# GaussianRegression
function get_gain(params::EvoTypes{L}, ∑::AbstractVector) where {L<:MLE2P}
ϵ = eps(eltype(∑))
(∑[1]^2 / max(ϵ, (∑[3] + params.lambda * ∑[5] + params.L2)) + ∑[2]^2 / max(ϵ, (∑[4] + params.lambda * ∑[5] + params.L2))) / 2
end
# MultiClassRegression
function get_gain(params::EvoTypes{L}, ∑::AbstractVector{T}) where {L<:MLogLoss,T}
ϵ = eps(eltype(∑))
gain = zero(T)
K = (length(∑) - 1) ÷ 2
@inbounds for k = 1:K
gain += ∑[k]^2 / max(ϵ, (∑[k+K] + params.lambda * ∑[end] + params.L2)) / 2
end
return gain
end
# Quantile
function get_gain(::EvoTypes{L}, ∑::AbstractVector) where {L<:Quantile}
abs(∑[1])
end
# L1
function get_gain(::EvoTypes{L}, ∑::AbstractVector) where {L<:L1}
abs(∑[1])
end
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 14245 | abstract type ModelType end
abstract type GradientRegression <: ModelType end
abstract type MLE2P <: ModelType end # 2-parameters max-likelihood
abstract type MSE <: GradientRegression end
abstract type LogLoss <: GradientRegression end
abstract type Poisson <: GradientRegression end
abstract type Gamma <: GradientRegression end
abstract type Tweedie <: GradientRegression end
abstract type MLogLoss <: ModelType end
abstract type GaussianMLE <: MLE2P end
abstract type LogisticMLE <: MLE2P end
abstract type Quantile <: ModelType end
abstract type L1 <: ModelType end
# Converts MSE -> :mse
const _type2loss_dict = Dict(
MSE => :mse,
LogLoss => :logloss,
Poisson => :poisson,
Gamma => :gamma,
Tweedie => :tweedie,
MLogLoss => :mlogloss,
GaussianMLE => :gaussian_mle,
LogisticMLE => :logistic_mle,
Quantile => :quantile,
L1 => :l1,
)
_type2loss(L::Type) = _type2loss_dict[L]
# make a Random Number Generator object
mk_rng(rng::AbstractRNG) = rng
mk_rng(int::Integer) = Random.MersenneTwister(int)
mutable struct EvoTreeRegressor{L<:ModelType} <: MMI.Deterministic
nrounds::Int
L2::Float64
lambda::Float64
gamma::Float64
eta::Float64
max_depth::Int
min_weight::Float64 # real minimum number of observations, different from xgboost (but same for linear)
rowsample::Float64 # subsample
colsample::Float64
nbins::Int
alpha::Float64
monotone_constraints::Any
tree_type::String
rng::Any
end
function EvoTreeRegressor(; kwargs...)
# defaults arguments
args = Dict{Symbol,Any}(
:loss => :mse,
:nrounds => 100,
:L2 => 0.0,
:lambda => 0.0,
:gamma => 0.0, # min gain to split
:eta => 0.1, # learning rate
:max_depth => 6,
:min_weight => 1.0, # minimal weight, different from xgboost (but same for linear)
:rowsample => 1.0,
:colsample => 1.0,
:nbins => 64,
:alpha => 0.5,
:monotone_constraints => Dict{Int,Int}(),
:tree_type => "binary",
:rng => 123,
)
args_override = intersect(keys(args), keys(kwargs))
for arg in args_override
args[arg] = kwargs[arg]
end
args[:rng] = mk_rng(args[:rng])
args[:loss] = Symbol(args[:loss])
if args[:loss] == :mse
L = MSE
elseif args[:loss] == :linear
L = MSE
elseif args[:loss] == :logloss
L = LogLoss
elseif args[:loss] == :logistic
L = LogLoss
elseif args[:loss] == :gamma
L = Gamma
elseif args[:loss] == :tweedie
L = Tweedie
elseif args[:loss] == :l1
L = L1
elseif args[:loss] == :quantile
L = Quantile
else
error(
"Invalid loss: $(args[:loss]). Only [`:mse`, `:logloss`, `:gamma`, `:tweedie`, `:l1`, `:quantile`] are supported by EvoTreeRegressor.",
)
end
check_args(args)
model = EvoTreeRegressor{L}(
args[:nrounds],
args[:L2],
args[:lambda],
args[:gamma],
args[:eta],
args[:max_depth],
args[:min_weight],
args[:rowsample],
args[:colsample],
args[:nbins],
args[:alpha],
args[:monotone_constraints],
args[:tree_type],
args[:rng],
)
return model
end
function EvoTreeRegressor{L}(; kwargs...) where {L}
EvoTreeRegressor(; loss=_type2loss(L), kwargs...)
end
mutable struct EvoTreeCount{L<:ModelType} <: MMI.Probabilistic
nrounds::Int
L2::Float64
lambda::Float64
gamma::Float64
eta::Float64
max_depth::Int
min_weight::Float64 # real minimum number of observations, different from xgboost (but same for linear)
rowsample::Float64 # subsample
colsample::Float64
nbins::Int
alpha::Float64
monotone_constraints::Any
tree_type::String
rng::Any
end
function EvoTreeCount(; kwargs...)
# defaults arguments
args = Dict{Symbol,Any}(
:nrounds => 100,
:L2 => 0.0,
:lambda => 0.0,
:gamma => 0.0, # min gain to split
:eta => 0.1, # learning rate
:max_depth => 6,
:min_weight => 1.0, # minimal weight, different from xgboost (but same for linear)
:rowsample => 1.0,
:colsample => 1.0,
:nbins => 64,
:alpha => 0.5,
:monotone_constraints => Dict{Int,Int}(),
:tree_type => "binary",
:rng => 123,
)
args_override = intersect(keys(args), keys(kwargs))
for arg in args_override
args[arg] = kwargs[arg]
end
args[:rng] = mk_rng(args[:rng])
L = Poisson
check_args(args)
model = EvoTreeCount{L}(
args[:nrounds],
args[:L2],
args[:lambda],
args[:gamma],
args[:eta],
args[:max_depth],
args[:min_weight],
args[:rowsample],
args[:colsample],
args[:nbins],
args[:alpha],
args[:monotone_constraints],
args[:tree_type],
args[:rng],
)
return model
end
function EvoTreeCount{L}(; kwargs...) where {L}
EvoTreeCount(; kwargs...)
end
mutable struct EvoTreeClassifier{L<:ModelType} <: MMI.Probabilistic
nrounds::Int
L2::Float64
lambda::Float64
gamma::Float64
eta::Float64
max_depth::Int
min_weight::Float64 # real minimum number of observations, different from xgboost (but same for linear)
rowsample::Float64 # subsample
colsample::Float64
nbins::Int
alpha::Float64
tree_type::String
rng::Any
end
function EvoTreeClassifier(; kwargs...)
# defaults arguments
args = Dict{Symbol,Any}(
:nrounds => 100,
:L2 => 0.0,
:lambda => 0.0,
:gamma => 0.0, # min gain to split
:eta => 0.1, # learning rate
:max_depth => 6,
:min_weight => 1.0, # minimal weight, different from xgboost (but same for linear)
:rowsample => 1.0,
:colsample => 1.0,
:nbins => 64,
:alpha => 0.5,
:tree_type => "binary",
:rng => 123,
)
args_override = intersect(keys(args), keys(kwargs))
for arg in args_override
args[arg] = kwargs[arg]
end
args[:rng] = mk_rng(args[:rng])
L = MLogLoss
check_args(args)
model = EvoTreeClassifier{L}(
args[:nrounds],
args[:L2],
args[:lambda],
args[:gamma],
args[:eta],
args[:max_depth],
args[:min_weight],
args[:rowsample],
args[:colsample],
args[:nbins],
args[:alpha],
args[:tree_type],
args[:rng],
)
return model
end
function EvoTreeClassifier{L}(; kwargs...) where {L}
EvoTreeClassifier(; kwargs...)
end
mutable struct EvoTreeMLE{L<:ModelType} <: MMI.Probabilistic
nrounds::Int
L2::Float64
lambda::Float64
gamma::Float64
eta::Float64
max_depth::Int
min_weight::Float64 # real minimum number of observations, different from xgboost (but same for linear)
rowsample::Float64 # subsample
colsample::Float64
nbins::Int
alpha::Float64
monotone_constraints::Any
tree_type::String
rng::Any
end
function EvoTreeMLE(; kwargs...)
# defaults arguments
args = Dict{Symbol,Any}(
:loss => :gaussian_mle,
:nrounds => 100,
:L2 => 0.0,
:lambda => 0.0,
:gamma => 0.0, # min gain to split
:eta => 0.1, # learning rate
:max_depth => 6,
:min_weight => 8.0, # minimal weight, different from xgboost (but same for linear)
:rowsample => 1.0,
:colsample => 1.0,
:nbins => 64,
:alpha => 0.5,
:monotone_constraints => Dict{Int,Int}(),
:tree_type => "binary",
:rng => 123,
)
args_override = intersect(keys(args), keys(kwargs))
for arg in args_override
args[arg] = kwargs[arg]
end
args[:rng] = mk_rng(args[:rng])
args[:loss] = Symbol(args[:loss])
if args[:loss] in [:gaussian, :gaussian_mle]
L = GaussianMLE
elseif args[:loss] in [:logistic, :logistic_mle]
L = LogisticMLE
else
error(
"Invalid loss: $(args[:loss]). Only `:gaussian_mle` and `:logistic_mle` are supported by EvoTreeMLE.",
)
end
check_args(args)
model = EvoTreeMLE{L}(
args[:nrounds],
args[:L2],
args[:lambda],
args[:gamma],
args[:eta],
args[:max_depth],
args[:min_weight],
args[:rowsample],
args[:colsample],
args[:nbins],
args[:alpha],
args[:monotone_constraints],
args[:tree_type],
args[:rng],
)
return model
end
function EvoTreeMLE{L}(; kwargs...) where {L}
if L == GaussianMLE
loss = :gaussian_mle
elseif L == LogisticMLE
loss = :logistic_mle
end
EvoTreeMLE(; loss=loss, kwargs...)
end
mutable struct EvoTreeGaussian{L<:ModelType} <: MMI.Probabilistic
nrounds::Int
L2::Float64
lambda::Float64
gamma::Float64
eta::Float64
max_depth::Int
min_weight::Float64 # real minimum number of observations, different from xgboost (but same for linear)
rowsample::Float64 # subsample
colsample::Float64
nbins::Int
alpha::Float64
monotone_constraints::Any
tree_type::String
rng::Any
end
function EvoTreeGaussian(; kwargs...)
# defaults arguments
args = Dict{Symbol,Any}(
:nrounds => 100,
:L2 => 0.0,
:lambda => 0.0,
:gamma => 0.0, # min gain to split
:eta => 0.1, # learning rate
:max_depth => 6,
:min_weight => 8.0, # minimal weight, different from xgboost (but same for linear)
:rowsample => 1.0,
:colsample => 1.0,
:nbins => 64,
:alpha => 0.5,
:monotone_constraints => Dict{Int,Int}(),
:tree_type => "binary",
:rng => 123,
)
args_override = intersect(keys(args), keys(kwargs))
for arg in args_override
args[arg] = kwargs[arg]
end
args[:rng] = mk_rng(args[:rng])
L = GaussianMLE
check_args(args)
model = EvoTreeGaussian{L}(
args[:nrounds],
args[:L2],
args[:lambda],
args[:gamma],
args[:eta],
args[:max_depth],
args[:min_weight],
args[:rowsample],
args[:colsample],
args[:nbins],
args[:alpha],
args[:monotone_constraints],
args[:tree_type],
args[:rng],
)
return model
end
function EvoTreeGaussian{L}(; kwargs...) where {L}
EvoTreeGaussian(; kwargs...)
end
const EvoTypes{L} = Union{
EvoTreeRegressor{L},
EvoTreeCount{L},
EvoTreeClassifier{L},
EvoTreeGaussian{L},
EvoTreeMLE{L},
}
_get_struct_loss(::EvoTypes{L}) where {L} = L
function Base.show(io::IO, config::EvoTypes)
println(io, "$(typeof(config))")
for fname in fieldnames(typeof(config))
println(io, " - $fname: $(getfield(config, fname))")
end
end
"""
check_parameter(::Type{<:T}, value, min_value::Real, max_value::Real, label::Symbol) where {T<:Number}
Check model parameter if it's valid
"""
function check_parameter(::Type{<:T}, value, min_value::Real, max_value::Real, label::Symbol) where {T<:Number}
min_value = max(typemin(T), min_value)
max_value = min(typemax(T), max_value)
try
convert(T, value)
@assert min_value <= value <= max_value
catch
error("Invalid value for parameter `$(string(label))`: $value. `$(string(label))` must be of type $T with value between $min_value and $max_value.")
end
end
"""
check_args(args::Dict{Symbol,Any})
Check model arguments if they are valid
"""
function check_args(args::Dict{Symbol,Any})
# Check integer parameters
check_parameter(Int, args[:nrounds], 0, typemax(Int), :nrounds)
check_parameter(Int, args[:max_depth], 1, typemax(Int), :max_depth)
check_parameter(Int, args[:nbins], 2, 255, :nbins)
# check positive float parameters
check_parameter(Float64, args[:lambda], zero(Float64), typemax(Float64), :lambda)
check_parameter(Float64, args[:gamma], zero(Float64), typemax(Float64), :gamma)
check_parameter(Float64, args[:min_weight], zero(Float64), typemax(Float64), :min_weight)
# check bounded parameters
check_parameter(Float64, args[:alpha], zero(Float64), one(Float64), :alpha)
check_parameter(Float64, args[:rowsample], eps(Float64), one(Float64), :rowsample)
check_parameter(Float64, args[:colsample], eps(Float64), one(Float64), :colsample)
check_parameter(Float64, args[:eta], zero(Float64), typemax(Float64), :eta)
try
tree_type = string(args[:tree_type])
@assert tree_type ∈ ["binary", "oblivious"]
catch
error("Invalid input for `tree_type` parameter: `$(args[:tree_type])`. Must be of one of `binary` or `oblivious`")
end
end
"""
check_args(model::EvoTypes{L}) where {L}
Check model arguments if they are valid (eg, after mutation when tuning hyperparams)
Note: does not check consistency of model type and loss selected
"""
function check_args(model::EvoTypes{L}) where {L}
# Check integer parameters
check_parameter(Int, model.max_depth, 1, typemax(Int), :max_depth)
check_parameter(Int, model.nrounds, 0, typemax(Int), :nrounds)
check_parameter(Int, model.nbins, 2, 255, :nbins)
# check positive float parameters
check_parameter(Float64, model.lambda, zero(Float64), typemax(Float64), :lambda)
check_parameter(Float64, model.gamma, zero(Float64), typemax(Float64), :gamma)
check_parameter(Float64, model.min_weight, zero(Float64), typemax(Float64), :min_weight)
# check bounded parameters
check_parameter(Float64, model.alpha, zero(Float64), one(Float64), :alpha)
check_parameter(Float64, model.rowsample, eps(Float64), one(Float64), :rowsample)
check_parameter(Float64, model.colsample, eps(Float64), one(Float64), :colsample)
check_parameter(Float64, model.eta, zero(Float64), typemax(Float64), :eta)
try
tree_type = string(model.tree_type)
@assert tree_type ∈ ["binary", "oblivious"]
catch
error("Invalid input for `tree_type` parameter: `$(model.tree_type)`. Must be of one of `binary` or `oblivious`")
end
end | EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 4507 | function get_adj_list(tree::EvoTrees.Tree)
n = 1
map = ones(Int, 1)
adj = Vector{Vector{Int}}()
if tree.split[1]
push!(adj, [n + 1, n + 2])
n += 2
else
push!(adj, [])
end
for i = 2:length(tree.split)
if tree.split[i]
push!(map, i)
push!(adj, [n + 1, n + 2])
n += 2
elseif tree.split[i>>1]
push!(map, i)
push!(adj, [])
end
end
return (map=map, adj=adj)
end
function get_shapes(tree_layout)
shapes = Vector(undef, length(tree_layout))
for i = eachindex(tree_layout)
x, y = tree_layout[i][1], tree_layout[i][2] # center point
x_buff = 0.45
y_buff = 0.45
shapes[i] = [
(x - x_buff, y + y_buff),
(x + x_buff, y + y_buff),
(x + x_buff, y - y_buff),
(x - x_buff, y - y_buff),
]
end
return shapes
end
function get_annotations(tree_layout, map, tree, var_names)
# annotations = Vector{Tuple{Float64, Float64, String, Tuple}}(undef, length(tree_layout))
annotations = []
for i = eachindex(tree_layout)
x, y = tree_layout[i][1], tree_layout[i][2] # center point
if tree.split[map[i]]
feat =
isnothing(var_names) ? "feat: " * string(tree.feat[map[i]]) :
var_names[tree.feat[map[i]]]
txt = "$feat\n" * string(round(tree.cond_float[map[i]], sigdigits=3))
else
txt = "pred:\n" * string(round(tree.pred[1, map[i]], sigdigits=3))
end
# annotations[i] = (x, y, txt, (9, :white, "helvetica"))
push!(annotations, (x, y, txt, 10))
end
return annotations
end
function get_curves(adj, tree_layout, shapes)
curves = []
num_curves = sum(length.(adj))
for i = eachindex(adj)
for j = eachindex(adj[i])
# curves is a length 2 tuple: (vector Xs, vector Ys)
push!(
curves,
(
[tree_layout[i][1], tree_layout[adj[i][j]][1]],
[shapes[i][3][2], shapes[adj[i][j]][1][2]],
),
)
end
end
return curves
end
@recipe function plot(tree::EvoTrees.Tree, var_names=nothing)
map, adj = EvoTrees.get_adj_list(tree)
tree_layout = length(adj) == 1 ? [[0.0, 0.0]] : NetworkLayout.buchheim(adj)
shapes = EvoTrees.get_shapes(tree_layout) # issue with Shape coming from Plots... to be converted o Shape in Receipe?
annotations = EvoTrees.get_annotations(tree_layout, map, tree, var_names) # same with Plots.text
curves = EvoTrees.get_curves(adj, tree_layout, shapes)
size_base = floor(log2(length(adj)))
size = (128 * 2^size_base, 96 * (1 + size_base))
background_color --> :white
linecolor --> :black
legend --> nothing
axis --> nothing
framestyle --> :none
size --> size
annotations --> annotations
for i = eachindex(shapes)
@series begin
fillcolor = length(adj[i]) == 0 ? "#84DCC6" : "#C8D3D5"
fillcolor --> fillcolor
seriestype --> :shape
return shapes[i]
end
end
for i = eachindex(curves)
@series begin
seriestype --> :curves
return curves[i]
end
end
end
@recipe function plot(model::EvoTrees.EvoTree, n=1; var_names=model.info[:fnames])
tree = model.trees[n]
map, adj = EvoTrees.get_adj_list(tree)
tree_layout = length(adj) == 1 ? [[0.0, 0.0]] : NetworkLayout.buchheim(adj)
shapes = EvoTrees.get_shapes(tree_layout) # issue with Shape coming from Plots... to be converted o Shape in Receipe?
annotations = EvoTrees.get_annotations(tree_layout, map, tree, var_names) # same with Plots.text
curves = EvoTrees.get_curves(adj, tree_layout, shapes)
size_base = floor(log2(length(adj)))
size = (128 * 2^size_base, 96 * (1 + size_base))
background_color --> :white
linecolor --> :black
legend --> nothing
axis --> nothing
framestyle --> :none
size --> size
annotations --> annotations
for i = eachindex(shapes)
@series begin
fillcolor = length(adj[i]) == 0 ? "#84DCC6" : "#C8D3D5"
fillcolor --> fillcolor
seriestype --> :shape
return shapes[i]
end
end
for i = eachindex(curves)
@series begin
seriestype --> :curves
return curves[i]
end
end
end | EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 6020 | function predict!(pred::Matrix{T}, tree::Tree{L,K}, x_bin::Matrix{UInt8}, feattypes::Vector{Bool}) where {L<:GradientRegression,K,T}
@threads for i in axes(x_bin, 1)
nid = 1
@inbounds while tree.split[nid]
feat = tree.feat[nid]
cond = feattypes[feat] ? x_bin[i, feat] <= tree.cond_bin[nid] : x_bin[i, feat] == tree.cond_bin[nid]
nid = nid << 1 + !cond
end
@inbounds pred[1, i] += tree.pred[1, nid]
end
return nothing
end
function predict!(pred::Matrix{T}, tree::Tree{L,K}, x_bin::Matrix{UInt8}, feattypes::Vector{Bool}) where {L<:LogLoss,K,T}
@threads for i in axes(x_bin, 1)
nid = 1
@inbounds while tree.split[nid]
feat = tree.feat[nid]
cond = feattypes[feat] ? x_bin[i, feat] <= tree.cond_bin[nid] : x_bin[i, feat] == tree.cond_bin[nid]
nid = nid << 1 + !cond
end
@inbounds pred[1, i] = clamp(pred[1, i] + tree.pred[1, nid], T(-15), T(15))
end
return nothing
end
function predict!(pred::Matrix{T}, tree::Tree{L,K}, x_bin::Matrix{UInt8}, feattypes::Vector{Bool}) where {L<:MLE2P,K,T}
@threads for i in axes(x_bin, 1)
nid = 1
@inbounds while tree.split[nid]
feat = tree.feat[nid]
cond = feattypes[feat] ? x_bin[i, feat] <= tree.cond_bin[nid] : x_bin[i, feat] == tree.cond_bin[nid]
nid = nid << 1 + !cond
end
@inbounds pred[1, i] += tree.pred[1, nid]
@inbounds pred[2, i] = max(T(-15), pred[2, i] + tree.pred[2, nid])
end
return nothing
end
function predict!(pred::Matrix{T}, tree::Tree{L,K}, x_bin::Matrix{UInt8}, feattypes::Vector{Bool}) where {L<:MLogLoss,K,T}
@threads for i in axes(x_bin, 1)
nid = 1
@inbounds while tree.split[nid]
feat = tree.feat[nid]
cond = feattypes[feat] ? x_bin[i, feat] <= tree.cond_bin[nid] : x_bin[i, feat] == tree.cond_bin[nid]
nid = nid << 1 + !cond
end
@inbounds for k = 1:K
pred[k, i] += tree.pred[k, nid]
end
@views pred[:, i] .= max.(T(-15), pred[:, i] .- maximum(pred[:, i]))
end
return nothing
end
"""
predict!(pred::Matrix, tree::Tree, X)
Generic fallback to add predictions of `tree` to existing `pred` matrix.
"""
function predict!(pred::Matrix{T}, tree::Tree{L,K}, x_bin::Matrix{UInt8}, feattypes::Vector{Bool}) where {L,K,T}
@threads for i in axes(x_bin, 1)
nid = 1
@inbounds while tree.split[nid]
feat = tree.feat[nid]
cond = feattypes[feat] ? x_bin[i, feat] <= tree.cond_bin[nid] : x_bin[i, feat] == tree.cond_bin[nid]
nid = nid << 1 + !cond
end
@inbounds for k = 1:K
pred[k, i] += tree.pred[k, nid]
end
end
return nothing
end
"""
predict(model::EvoTree, X::AbstractMatrix; ntree_limit = length(model.trees))
Predictions from an EvoTree model - sums the predictions from all trees composing the model.
Use `ntree_limit=N` to only predict with the first `N` trees.
"""
function predict(
m::EvoTree{L,K},
data,
::Type{<:Device}=CPU;
ntree_limit=length(m.trees)) where {L,K}
Tables.istable(data) ? data = Tables.columntable(data) : nothing
ntrees = length(m.trees)
ntree_limit > ntrees && error("ntree_limit is larger than number of trees $ntrees.")
x_bin = binarize(data; fnames=m.info[:fnames], edges=m.info[:edges])
nobs = size(x_bin, 1)
pred = zeros(Float32, K, nobs)
for i = 1:ntree_limit
predict!(pred, m.trees[i], x_bin, m.info[:feattypes])
end
if L == LogLoss
pred .= sigmoid.(pred)
elseif L ∈ [Poisson, Gamma, Tweedie]
pred .= exp.(pred)
elseif L in [GaussianMLE, LogisticMLE]
pred[2, :] .= exp.(pred[2, :])
elseif L == MLogLoss
softmax!(pred)
end
pred = K == 1 ? vec(Array(pred')) : Array(pred')
return pred
end
function softmax!(p::AbstractMatrix)
@threads for i in axes(p, 2)
_p = view(p, :, i)
_p .= exp.(_p)
isum = sum(_p)
_p ./= isum
end
return nothing
end
function pred_leaf_cpu!(p::Matrix, n, ∑::AbstractVector{T}, params::EvoTypes{L}, ∇, is) where {L<:GradientRegression,T}
ϵ = eps(T)
p[1, n] = -params.eta * ∑[1] / max(ϵ, (∑[2] + params.lambda * ∑[3] + params.L2))
end
function pred_scalar(∑::AbstractVector{T}, params::EvoTypes{L}) where {L<:GradientRegression,T}
ϵ = eps(T)
-params.eta * ∑[1] / max(ϵ, (∑[2] + params.lambda * ∑[3] + params.L2))
end
# prediction in Leaf - MLE2P
function pred_leaf_cpu!(p::Matrix, n, ∑::AbstractVector{T}, params::EvoTypes{L}, ∇, is) where {L<:MLE2P,T}
ϵ = eps(T)
p[1, n] = -params.eta * ∑[1] / max(ϵ, (∑[3] + params.lambda * ∑[5] + params.L2))
p[2, n] = -params.eta * ∑[2] / max(ϵ, (∑[4] + params.lambda * ∑[5] + params.L2))
end
function pred_scalar(∑::AbstractVector{T}, params::EvoTypes{L}) where {L<:MLE2P,T}
ϵ = eps(T)
-params.eta * ∑[1] / max(ϵ, (∑[3] + params.lambda * ∑[5] + params.L2))
end
# prediction in Leaf - MultiClassRegression
function pred_leaf_cpu!(p::Matrix, n, ∑::AbstractVector{T}, params::EvoTypes{L}, ∇, is) where {L<:MLogLoss,T}
ϵ = eps(T)
K = size(p, 1)
@inbounds for k = axes(p, 1)
p[k, n] = -params.eta * ∑[k] / max(ϵ, (∑[k+K] + params.lambda * ∑[end] + params.L2))
end
end
# prediction in Leaf - Quantile
function pred_leaf_cpu!(p::Matrix, n, ∑::AbstractVector{T}, params::EvoTypes{L}, ∇, is) where {L<:Quantile,T}
p[1, n] = params.eta * quantile(∇[2, is], params.alpha) / (1 + params.lambda + params.L2)
end
# prediction in Leaf - L1
function pred_leaf_cpu!(p::Matrix, n, ∑::AbstractVector{T}, params::EvoTypes{L}, ∇, is) where {L<:L1,T}
ϵ = eps(T)
p[1, n] = params.eta * ∑[1] / max(ϵ, (∑[3] * (1 + params.lambda + params.L2)))
end
function pred_scalar(∑::AbstractVector, params::EvoTypes{L1})
ϵ = eps(T)
params.eta * ∑[1] / max(ϵ, (∑[3] * (1 + params.lambda + params.L2)))
end
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 2695 | abstract type Device end
abstract type CPU <: Device end
abstract type GPU <: Device end
"""
TrainNode{S,V,M}
Carries training information for a given tree node
"""
mutable struct TrainNode{S,V,M}
gain::Float64
is::S
∑::V
h::Vector{M}
hL::Vector{M}
hR::Vector{M}
gains::Vector{V}
end
function TrainNode(featbins, K, is)
node = TrainNode(
zero(Float64),
is,
zeros(2 * K + 1),
[zeros(2 * K + 1, nbins) for nbins in featbins],
[zeros(2 * K + 1, nbins) for nbins in featbins],
[zeros(2 * K + 1, nbins) for nbins in featbins],
[zeros(nbins) for nbins in featbins],
)
return node
end
# single tree is made of a vectors of length num nodes
struct Tree{L,K}
feat::Vector{Int}
cond_bin::Vector{UInt8}
cond_float::Vector{Any}
gain::Vector{Float64}
pred::Matrix{Float32}
split::Vector{Bool}
end
function Tree{L,K}(x::Vector) where {L,K}
Tree{L,K}(
zeros(Int, 1),
zeros(UInt8, 1),
zeros(Float64, 1),
zeros(Float64, 1),
reshape(x, :, 1),
zeros(Bool, 1),
)
end
function Tree{L,K}(depth::Int) where {L,K}
Tree{L,K}(
zeros(Int, 2^depth - 1),
zeros(UInt8, 2^depth - 1),
zeros(Float64, 2^depth - 1),
zeros(Float64, 2^depth - 1),
zeros(Float32, K, 2^depth - 1),
zeros(Bool, 2^depth - 1),
)
end
function Base.show(io::IO, tree::Tree)
println(io, "$(typeof(tree))")
for fname in fieldnames(typeof(tree))
println(io, " - $fname: $(getfield(tree, fname))")
end
end
"""
EvoTree{L,K}
An `EvoTree` holds the structure of a fitted gradient-boosted tree.
# Fields
- trees::Vector{Tree{L,K}}
- info::Dict
`EvoTree` acts as a functor to perform inference on input data:
```
pred = (m::EvoTree; ntree_limit=length(m.trees))(x)
```
"""
struct EvoTree{L,K}
trees::Vector{Tree{L,K}}
info::Dict
end
# (m::EvoTree)(data, device::Type{D}=CPU; ntree_limit=length(m.trees)) where {D<:Device} =
# predict(m, data, device; ntree_limit)
function (m::EvoTree)(data; ntree_limit=length(m.trees), device="cpu")
@assert string(device) ∈ ["cpu", "gpu"]
_device = string(device) == "cpu" ? CPU : GPU
return predict(m, data, _device; ntree_limit)
end
_get_struct_loss(::EvoTree{L,K}) where {L,K} = L
function Base.show(io::IO, evotree::EvoTree)
println(io, "$(typeof(evotree))")
println(io, " - Contains $(length(evotree.trees)) trees in field `trees` (incl. 1 bias tree).")
println(io, " - Data input has $(length(evotree.info[:fnames])) features.")
println(io, " - $(keys(evotree.info)) info accessible in field `info`")
end | EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 1314 | """
subsample(out::AbstractVector, mask::AbstractVector, rowsample::AbstractFloat)
Returns a view of selected rows ids.
"""
function subsample(is_in::AbstractVector, is_out::AbstractVector, mask::AbstractVector, rowsample::AbstractFloat, rng)
Random.rand!(rng, mask)
cond = round(UInt8, 255 * rowsample)
chunk_size = cld(length(is_in), min(cld(length(is_in), 1024), Threads.nthreads()))
nblocks = cld(length(is_in), chunk_size)
counts = zeros(Int, nblocks)
@threads for bid = 1:nblocks
i_start = chunk_size * (bid - 1) + 1
i_stop = bid == nblocks ? length(is_in) : i_start + chunk_size - 1
count = 0
i = i_start
for i = i_start:i_stop
if mask[i] <= cond
is_in[i_start+count] = i
count += 1
end
end
counts[bid] = count
end
counts_cum = cumsum(counts) .- counts
@threads for bid = 1:nblocks
count_cum = counts_cum[bid]
i_start = chunk_size * (bid - 1)
@inbounds for i = 1:counts[bid]
is_out[count_cum+i] = is_in[i_start+i]
end
end
counts_sum = sum(counts)
if counts_cum == 0
@error "no subsample observation - choose larger rowsample"
else
return view(is_out, 1:counts_sum)
end
end | EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 11063 | using StatsBase: sample
using EvoTrees: sigmoid, logit
using MLJBase
using MLJTestInterface
@testset "generic interface tests" begin
@testset "EvoTreeRegressor, EvoTreeMLE, EvoTreeGaussian" begin
failures, summary = MLJTestInterface.test(
[EvoTreeRegressor, EvoTreeMLE, EvoTreeGaussian],
MLJTestInterface.make_regression()...;
mod=@__MODULE__,
verbosity=0, # bump to debug
throw=false # set to true to debug
)
@test isempty(failures)
end
@testset "EvoTreeCount" begin
failures, summary = MLJTestInterface.test(
[EvoTreeCount],
MLJTestInterface.make_count()...;
mod=@__MODULE__,
verbosity=0, # bump to debug
throw=false # set to true to debug
)
@test isempty(failures)
end
@testset "EvoTreeClassifier" begin
for data in [
MLJTestInterface.make_binary(),
MLJTestInterface.make_multiclass(),
]
failures, summary = MLJTestInterface.test(
[EvoTreeClassifier],
data...;
mod=@__MODULE__,
verbosity=0, # bump to debug
throw=false # set to true to debug
)
@test isempty(failures)
end
end
end
##################################################
### Regression - small data
##################################################
features = rand(1_000) .* 5 .- 2
X = reshape(features, (size(features)[1], 1))
Y = sin.(features) .* 0.5 .+ 0.5
Y = logit(Y) + randn(size(Y))
Y = sigmoid(Y)
y = Y
X = MLJBase.table(X)
# @load EvoTreeRegressor
# linear regression
tree_model = EvoTreeRegressor(max_depth=5, eta=0.05, nrounds=10)
# logloss - logistic regression
tree_model = EvoTreeRegressor(loss=:logloss, max_depth=5, eta=0.05, nrounds=10)
# quantile regression
# tree_model = EvoTreeRegressor(loss=:quantile, alpha=0.75, max_depth=5, eta=0.05, nrounds=10)
mach = machine(tree_model, X, y)
train, test = partition(eachindex(y), 0.7, shuffle=true); # 70:30 split
fit!(mach, rows=train, verbosity=1)
mach.model.nrounds += 10
fit!(mach, rows=train, verbosity=1)
# predict on train data
pred_train = predict(mach, selectrows(X, train))
mean(abs.(pred_train - selectrows(Y, train)))
# predict on test data
pred_test = predict(mach, selectrows(X, test))
mean(abs.(pred_test - selectrows(Y, test)))
@test MLJBase.iteration_parameter(EvoTreeRegressor) == :nrounds
##################################################
### Regression - GPU
##################################################
# tree_model = EvoTreeRegressor(loss = :logloss, max_depth = 5, eta = 0.05, nrounds = 10, device = "gpu")
# mach = machine(tree_model, X, y)
# train, test = partition(eachindex(y), 0.7, shuffle = true); # 70:30 split
# fit!(mach, rows = train, verbosity = 1)
# mach.model.nrounds += 10
# fit!(mach, rows = train, verbosity = 1)
# # predict on train data
# pred_train = predict(mach, selectrows(X, train))
# mean(abs.(pred_train - selectrows(Y, train)))
# # predict on test data
# pred_test = predict(mach, selectrows(X, test))
# mean(abs.(pred_test - selectrows(Y, test)))
# @test MLJBase.iteration_parameter(EvoTreeRegressor) == :nrounds
##################################################
### classif - categorical target
##################################################
X, y = @load_crabs
tree_model = EvoTreeClassifier(
max_depth=4,
eta=0.05,
lambda=0.0,
gamma=0.0,
nrounds=10,
)
# @load EvoTreeRegressor
mach = machine(tree_model, X, y)
train, test = partition(eachindex(y), 0.7, shuffle=true); # 70:30 split
fit!(mach, rows=train, verbosity=1);
mach.model.nrounds += 50
fit!(mach, rows=train, verbosity=1)
pred_train = predict(mach, selectrows(X, train))
pred_train_mode = predict_mode(mach, selectrows(X, train))
sum(pred_train_mode .== y[train]) / length(y[train])
pred_test = predict(mach, selectrows(X, test))
pred_test_mode = predict_mode(mach, selectrows(X, test))
sum(pred_test_mode .== y[test]) / length(y[test])
pred_test_mode = predict_mode(mach, selectrows(X, test))
##################################################
### count
##################################################
features = rand(1_000, 10)
# features = rand(100, 10)
X = features
Y = rand(UInt8, size(X, 1))
𝑖 = collect(1:size(X, 1))
# train-eval split
𝑖_sample = sample(𝑖, size(𝑖, 1), replace=false)
train_size = 0.8
𝑖_train = 𝑖_sample[1:floor(Int, train_size * size(𝑖, 1))]
𝑖_eval = 𝑖_sample[floor(Int, train_size * size(𝑖, 1))+1:end]
X_train, X_eval = X[𝑖_train, :], X[𝑖_eval, :]
Y_train, Y_eval = Y[𝑖_train], Y[𝑖_eval]
# @load EvoTreeRegressor
tree_model = EvoTreeCount(
loss=:poisson,
metric=:poisson,
nrounds=10,
lambda=0.0,
gamma=0.0,
eta=0.1,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=0.5,
nbins=32,
)
X = MLJBase.table(X)
# typeof(X)
mach = machine(tree_model, X, Y)
train, test = partition(eachindex(Y), 0.8, shuffle=true); # 70:30 split
fit!(mach, rows=train, verbosity=1, force=true)
mach.model.nrounds += 10
fit!(mach, rows=train, verbosity=1)
pred = predict(mach, selectrows(X, train))
pred_mean = predict_mean(mach, selectrows(X, train))
pred_mode = predict_mode(mach, selectrows(X, train))
# pred_mode = predict_median(mach, selectrows(X,train))
##################################################
### Gaussian - Larger data
##################################################
features = rand(1_000, 10)
X = features
Y = rand(size(X, 1))
𝑖 = collect(1:size(X, 1))
# train-eval split
𝑖_sample = sample(𝑖, size(𝑖, 1), replace=false)
train_size = 0.8
𝑖_train = 𝑖_sample[1:floor(Int, train_size * size(𝑖, 1))]
𝑖_eval = 𝑖_sample[floor(Int, train_size * size(𝑖, 1))+1:end]
X_train, X_eval = X[𝑖_train, :], X[𝑖_eval, :]
Y_train, Y_eval = Y[𝑖_train], Y[𝑖_eval]
# @load EvoTreeRegressor
tree_model = EvoTreeGaussian(
nrounds=10,
lambda=0.0,
gamma=0.0,
eta=0.1,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=0.5,
nbins=32,
)
X = MLJBase.table(X)
# typeof(X)
mach = machine(tree_model, X, Y)
train, test = partition(eachindex(Y), 0.8, shuffle=true); # 70:30 split
fit!(mach, rows=train, verbosity=1, force=true)
mach.model.nrounds += 10
fit!(mach, rows=train, verbosity=1)
pred = predict(mach, selectrows(X, train))
pred_mean = predict_mean(mach, selectrows(X, train))
pred_mode = predict_mode(mach, selectrows(X, train))
# pred_mode = predict_median(mach, selectrows(X,train))
mean(abs.(pred_mean - selectrows(Y, train)))
q_20 = quantile.(pred, 0.20)
q_20 = quantile.(pred, 0.80)
report(mach)
##################################################
### LogLoss - Larger data
##################################################
features = rand(1_000, 10)
X = features
Y = rand(size(X, 1))
𝑖 = collect(1:size(X, 1))
# train-eval split
𝑖_sample = sample(𝑖, size(𝑖, 1), replace=false)
train_size = 0.8
𝑖_train = 𝑖_sample[1:floor(Int, train_size * size(𝑖, 1))]
𝑖_eval = 𝑖_sample[floor(Int, train_size * size(𝑖, 1))+1:end]
x_train, x_eval = X[𝑖_train, :], X[𝑖_eval, :]
y_train, y_eval = Y[𝑖_train], Y[𝑖_eval]
# @load EvoTreeRegressor
tree_model = EvoTreeMLE(
loss=:logistic_mle,
nrounds=10,
lambda=1.0,
gamma=0.0,
eta=0.1,
max_depth=6,
min_weight=32.0,
rowsample=0.5,
colsample=0.5,
nbins=32,
)
X = MLJBase.table(X)
# typeof(X)
mach = machine(tree_model, X, Y)
train, test = partition(eachindex(Y), 0.8, shuffle=true); # 70:30 split
fit!(mach, rows=train, verbosity=1, force=true)
mach.model.nrounds += 10
fit!(mach, rows=train, verbosity=1)
pred = predict(mach, selectrows(X, train))
pred_mean = predict_mean(mach, selectrows(X, train))
pred_mode = predict_mode(mach, selectrows(X, train))
# pred_mode = predict_median(mach, selectrows(X,train))
mean(abs.(pred_mean - selectrows(Y, train)))
q_20 = quantile.(pred, 0.20)
q_20 = quantile.(pred, 0.80)
report(mach)
############################
# Added in response to #92 #
############################
# tests that `update` handles data correctly in the case of a cold
# restatrt:
X = MLJBase.table(rand(5, 2))
y = rand(5)
model = EvoTreeRegressor()
data = MLJBase.reformat(model, X, y);
f, c, r = MLJBase.fit(model, 2, data...);
model.lambda = 0.1
MLJBase.update(model, 2, f, c, data...);
############################
# Feature Importances
############################
# Test feature importances are defined
for model ∈ [
EvoTreeClassifier(),
EvoTreeCount(),
EvoTreeRegressor(),
EvoTreeMLE(),
EvoTreeGaussian(),
]
@test reports_feature_importances(model) == true
end
# Test that feature importances work for Classifier
X, y = MLJBase.make_blobs(100, 3)
model = EvoTreeClassifier()
m = machine(model, X, y)
fit!(m)
rpt = MLJBase.report(m)
fi = MLJBase.feature_importances(model, m.fitresult, rpt)
@test size(fi, 1) == 3
X, y = MLJBase.make_regression(100, 3)
model = EvoTreeRegressor()
m = machine(model, X, y)
fit!(m)
rpt = MLJBase.report(m)
fi = MLJBase.feature_importances(model, m.fitresult, rpt)
@test size(fi, 1) == 3
##################################################
### Test with weights
##################################################
features = rand(1_000, 10)
X = features
Y = rand(size(X, 1))
W = rand(size(X, 1)) .+ 0.1
𝑖 = collect(1:size(X, 1))
# train-eval split
𝑖_sample = sample(𝑖, size(𝑖, 1), replace=false)
train_size = 0.8
𝑖_train = 𝑖_sample[1:floor(Int, train_size * size(𝑖, 1))]
𝑖_eval = 𝑖_sample[floor(Int, train_size * size(𝑖, 1))+1:end]
x_train, x_eval = X[𝑖_train, :], X[𝑖_eval, :]
y_train, y_eval = Y[𝑖_train], Y[𝑖_eval]
w_train, w_eval = W[𝑖_train], W[𝑖_eval]
# @load EvoTreeRegressor
tree_model = EvoTreeRegressor(
loss=:logloss,
nrounds=10,
lambda=1.0,
gamma=0.0,
eta=0.1,
max_depth=6,
min_weight=32.0,
rowsample=0.5,
colsample=0.5,
nbins=32,
)
X = MLJBase.table(X)
# typeof(X)
mach = machine(tree_model, X, Y, W)
train, test = partition(eachindex(Y), 0.8, shuffle=true); # 70:30 split
fit!(mach, rows=train, verbosity=1, force=true)
mach.model.nrounds += 10
fit!(mach, rows=train, verbosity=1)
report(mach)
@testset "MLJ - rowtables - EvoTreeRegressor" begin
X, y = make_regression(1000, 5)
X = Tables.rowtable(X)
booster = EvoTreeRegressor()
# smoke tests:
mach = machine(booster, X, y) |> fit!
fit!(mach)
predict(mach, X)
end
@testset "MLJ - matrix - EvoTreeRegressor" begin
X, y = make_regression(1000, 5)
X = Tables.matrix(X)
booster = EvoTreeRegressor()
# smoke tests:
mach = machine(booster, X, y) |> fit!
fit!(mach)
predict(mach, X)
end
##################################################
### issue #267: ordered target
##################################################
@testset "MLJ - supported ordered factor predictions" begin
X = (; x=rand(10))
y = coerce(rand("ab", 10), OrderedFactor)
model = EvoTreeClassifier()
mach = machine(model, X, y) |> fit!
yhat = predict(mach, X)
@assert isordered(yhat)
end
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 13476 | using Statistics
using StatsBase: sample
using EvoTrees: sigmoid, logit
using EvoTrees: check_args, check_parameter
using Random: seed!
# prepare a dataset
seed!(123)
features = rand(1_000) .* 5
X = reshape(features, (size(features)[1], 1))
Y = sin.(features) .* 0.5 .+ 0.5
Y = logit(Y) + randn(size(Y))
Y = sigmoid(Y)
is = collect(1:size(X, 1))
# train-eval split
i_sample = sample(is, size(is, 1), replace=false)
train_size = 0.8
i_train = i_sample[1:floor(Int, train_size * size(is, 1))]
i_eval = i_sample[floor(Int, train_size * size(is, 1))+1:end]
x_train, x_eval = X[i_train, :], X[i_eval, :]
y_train, y_eval = Y[i_train], Y[i_eval]
@testset "EvoTreeRegressor - MSE" begin
# mse
params1 = EvoTreeRegressor(
loss=:mse,
nrounds=100,
nbins=16,
lambda=0.5,
gamma=0.1,
eta=0.05,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=1.0,
rng=123,
)
model, cache = EvoTrees.init(params1, x_train, y_train)
preds_ini = EvoTrees.predict(model, x_eval)
mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2)
model = fit_evotree(
params1;
x_train,
y_train,
x_eval,
y_eval,
metric=:mse,
print_every_n=25
)
preds = EvoTrees.predict(model, x_eval)
mse_error = mean(abs.(preds .- y_eval) .^ 2)
mse_gain_pct = mse_error / mse_error_ini - 1
@test mse_gain_pct < -0.75
end
@testset "EvoTreeRegressor - logloss" begin
params1 = EvoTreeRegressor(
loss=:logloss,
nrounds=100,
lambda=0.5,
gamma=0.1,
eta=0.05,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=1.0,
rng=123,
)
model, cache = EvoTrees.init(params1, x_train, y_train)
preds_ini = EvoTrees.predict(model, x_eval)
mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2)
model = fit_evotree(
params1;
x_train,
y_train,
x_eval,
y_eval,
metric=:logloss,
print_every_n=25
)
preds = EvoTrees.predict(model, x_eval)
mse_error = mean(abs.(preds .- y_eval) .^ 2)
mse_gain_pct = mse_error / mse_error_ini - 1
@test mse_gain_pct < -0.75
end
@testset "EvoTreeRegressor - Gamma" begin
params1 = EvoTreeRegressor(
loss=:gamma,
nrounds=100,
lambda=0.5,
gamma=0.1,
eta=0.05,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=1.0,
rng=123,
)
model, cache = EvoTrees.init(params1, x_train, y_train)
preds_ini = EvoTrees.predict(model, x_eval)
mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2)
model = fit_evotree(
params1;
x_train,
y_train,
x_eval,
y_eval,
metric=:gamma,
print_every_n=25
)
preds = EvoTrees.predict(model, x_eval)
mse_error = mean(abs.(preds .- y_eval) .^ 2)
mse_gain_pct = mse_error / mse_error_ini - 1
@test mse_gain_pct < -0.75
end
@testset "EvoTreeRegressor - Tweedie" begin
params1 = EvoTreeRegressor(
loss=:tweedie,
nrounds=100,
lambda=0.5,
gamma=0.1,
eta=0.05,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=1.0,
rng=123,
)
model, cache = EvoTrees.init(params1, x_train, y_train)
preds_ini = EvoTrees.predict(model, x_eval)
mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2)
model = fit_evotree(
params1;
x_train,
y_train,
x_eval,
y_eval,
metric=:tweedie,
print_every_n=25
)
preds = EvoTrees.predict(model, x_eval)
mse_error = mean(abs.(preds .- y_eval) .^ 2)
mse_gain_pct = mse_error / mse_error_ini - 1
@test mse_gain_pct < -0.75
end
@testset "EvoTreeRegressor - L1" begin
params1 = EvoTreeRegressor(
loss=:l1,
alpha=0.5,
nrounds=100,
nbins=16,
lambda=0.5,
gamma=0.0,
eta=0.05,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=1.0,
rng=123,
)
model, cache = EvoTrees.init(params1, x_train, y_train)
preds_ini = EvoTrees.predict(model, x_eval)
mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2)
model = fit_evotree(
params1;
x_train,
y_train,
x_eval,
y_eval,
metric=:mae,
print_every_n=25
)
preds = EvoTrees.predict(model, x_eval)
mse_error = mean(abs.(preds .- y_eval) .^ 2)
mse_gain_pct = mse_error / mse_error_ini - 1
@test mse_gain_pct < -0.75
end
@testset "EvoTreeRegressor - Quantile" begin
params1 = EvoTreeRegressor(
loss=:quantile,
alpha=0.5,
nrounds=100,
nbins=16,
lambda=0.5,
gamma=0.0,
eta=0.05,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=1.0,
rng=123,
)
model, cache = EvoTrees.init(params1, x_train, y_train)
preds_ini = EvoTrees.predict(model, x_eval)
mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2)
model = fit_evotree(
params1;
x_train,
y_train,
x_eval,
y_eval,
metric=:wmae,
print_every_n=25
)
preds = EvoTrees.predict(model, x_eval)
mse_error = mean(abs.(preds .- y_eval) .^ 2)
mse_gain_pct = mse_error / mse_error_ini - 1
@test mse_gain_pct < -0.75
end
@testset "EvoTreeCount - Count" begin
params1 = EvoTreeCount(
loss=:poisson,
nrounds=100,
lambda=0.5,
gamma=0.1,
eta=0.05,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=1.0,
rng=123,
)
model, cache = EvoTrees.init(params1, x_train, y_train)
preds_ini = EvoTrees.predict(model, x_eval)
mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2)
model = fit_evotree(
params1;
x_train,
y_train,
x_eval,
y_eval,
metric=:poisson_deviance,
print_every_n=25
)
preds = EvoTrees.predict(model, x_eval)
mse_error = mean(abs.(preds .- y_eval) .^ 2)
mse_gain_pct = mse_error / mse_error_ini - 1
@test mse_gain_pct < -0.75
end
@testset "EvoTreeMLE - Gaussian" begin
params1 = EvoTreeMLE(
loss=:gaussian,
nrounds=100,
nbins=16,
lambda=0.0,
gamma=0.0,
eta=0.05,
max_depth=6,
min_weight=10.0,
rowsample=0.5,
colsample=1.0,
rng=123,
)
model, cache = EvoTrees.init(params1, x_train, y_train)
preds_ini = EvoTrees.predict(model, x_eval)[:, 1]
mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2)
model = fit_evotree(
params1;
x_train,
y_train,
x_eval,
y_eval,
metric=:gaussian,
print_every_n=25
)
preds = EvoTrees.predict(model, x_eval)[:, 1]
mse_error = mean(abs.(preds .- y_eval) .^ 2)
mse_gain_pct = mse_error / mse_error_ini - 1
@test mse_gain_pct < -0.75
end
@testset "EvoTreeMLE - Logistic" begin
params1 = EvoTreeMLE(
loss=:logistic,
nrounds=100,
nbins=16,
lambda=0.0,
gamma=0.0,
eta=0.05,
max_depth=6,
min_weight=10.0,
rowsample=0.5,
colsample=1.0,
rng=123,
)
model, cache = EvoTrees.init(params1, x_train, y_train)
preds_ini = EvoTrees.predict(model, x_eval)[:, 1]
mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2)
model = fit_evotree(
params1;
x_train,
y_train,
x_eval,
y_eval,
metric=:logistic_mle,
print_every_n=25
)
preds = EvoTrees.predict(model, x_eval)[:, 1]
mse_error = mean(abs.(preds .- y_eval) .^ 2)
mse_gain_pct = mse_error / mse_error_ini - 1
@test mse_gain_pct < -0.75
end
@testset "EvoTreeGaussian - Gaussian" begin
params1 = EvoTreeGaussian(
nrounds=100,
nbins=16,
lambda=0.0,
gamma=0.0,
eta=0.05,
max_depth=6,
min_weight=10.0,
rowsample=0.5,
colsample=1.0,
rng=123,
)
model, cache = EvoTrees.init(params1, x_train, y_train)
preds_ini = EvoTrees.predict(model, x_eval)[:, 1]
mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2)
model = fit_evotree(
params1;
x_train,
y_train,
x_eval,
y_eval,
metric="gaussian_mle",
print_every_n=25
)
preds = EvoTrees.predict(model, x_eval)[:, 1]
mse_error = mean(abs.(preds .- y_eval) .^ 2)
mse_gain_pct = mse_error / mse_error_ini - 1
@test mse_gain_pct < -0.75
end
@testset "EvoTrees - Feature Importance" begin
params1 = EvoTreeRegressor(
loss=:mse,
nrounds=100,
nbins=16,
lambda=0.5,
gamma=0.1,
eta=0.05,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=1.0,
rng=123,
)
model = fit_evotree(params1; x_train, y_train)
features_gain = EvoTrees.importance(model)
end
@testset "EvoTreeClassifier" begin
x_train = Array([
sin.(1:1000) rand(1000)
100 .* cos.(1:1000) rand(1000).+1
])
y_train = repeat(1:2; inner=1000)
rng = rand(UInt32)
params1 = EvoTreeClassifier(; nrounds=100, eta=0.3, rng)
model = fit_evotree(params1; x_train, y_train)
preds = EvoTrees.predict(model, x_train)[:, 1]
@test !any(isnan.(preds))
# Categorical array
y_train_cat = CategoricalArray(y_train; levels=1:2)
params1 = EvoTreeClassifier(; nrounds=100, eta=0.3, rng)
model_cat = fit_evotree(params1; x_train, y_train=y_train_cat)
preds_cat = EvoTrees.predict(model_cat, x_train)[:, 1]
@test preds_cat ≈ preds
# Categorical array with additional levels
y_train_cat = CategoricalArray(y_train; levels=1:3)
params1 = EvoTreeClassifier(; nrounds=100, eta=0.3, rng)
model_cat = fit_evotree(params1; x_train, y_train=y_train_cat)
preds_cat = EvoTrees.predict(model_cat, x_train)[:, 1]
@test preds_cat ≈ preds # differences due to different stream of random numbers
end
@testset "Parametric kwarg constructor" begin
@testset "_type2loss" begin
# utility that converts types into loss symbols for EvoTreeRegressor
@test EvoTrees._type2loss(EvoTrees.MSE) == :mse
@test EvoTrees._type2loss(EvoTrees.L1) == :l1
@test EvoTrees._type2loss(EvoTrees.LogLoss) == :logloss
@test EvoTrees._type2loss(EvoTrees.Gamma) == :gamma
@test EvoTrees._type2loss(EvoTrees.Tweedie) == :tweedie
@test EvoTrees._type2loss(EvoTrees.Quantile) == :quantile
end
# check if we retain the parametric information properly
for EvoParamType in [
EvoTreeRegressor{EvoTrees.MSE},
EvoTreeRegressor{EvoTrees.L1},
EvoTreeCount{EvoTrees.Poisson},
EvoTreeClassifier{EvoTrees.MLogLoss},
EvoTreeMLE{EvoTrees.LogisticMLE},
EvoTreeGaussian{EvoTrees.GaussianMLE}
]
config = EvoParamType(; max_depth=2)
@test config isa EvoParamType
@test config.max_depth == 2
end
end
@testset "check_args functionality" begin
# check_args should throw an exception if the parameters are invalid
@testset "check_parameter" begin
# Valid case tests
@test check_parameter(Float64, 1.5, 0.0, typemax(Float64), :lambda) == nothing
@test check_parameter(Int, 5, 1, typemax(Int), :nrounds) == nothing
@test check_parameter(Int, 1, 1, typemax(Int), :nrounds) == nothing
@test check_parameter(Int, 1, 1, 1, :nrounds) == nothing
# Invalid type tests
@test_throws ErrorException check_parameter(Int, 1.5, 0, typemax(Int), :nrounds)
@test_throws ErrorException check_parameter(Float64, "1.5", 0.0, typemax(Float64), :lambda)
# Out of range tests
@test_throws ErrorException check_parameter(Int, -5, 0, typemax(Int), :nrounds)
@test_throws ErrorException check_parameter(Float64, -0.1, 0.0, typemax(Float64), :lambda)
@test_throws ErrorException check_parameter(Int, typemax(Int64), 0, typemax(Int) - 1, :nrounds)
@test_throws ErrorException check_parameter(Float64, typemax(Float64), 0.0, 10^6, :lambda)
end
# Check the implemented parameters on construction
@testset "check_args all for EvoTreeRegressor" begin
for (key, vals_to_test) in zip(
[:nrounds, :max_depth, :nbins, :lambda, :gamma, :min_weight, :alpha, :rowsample, :colsample, :eta],
[[-1, 1.5], [0, 1.5], [1, 256, 100.5], [-eps(Float64)], [-eps(Float64)], [-eps(Float64)],
[-0.1, 1.1], [0.0f0, 1.1f0], [0.0, 1.1], [-eps(Float64)]])
for val in vals_to_test
@test_throws Exception EvoTreeRegressor(; zip([key], [val])...)
end
end
end
# Test all EvoTypes that they have *some* checks in place
@testset "check_args EvoTypes" begin
for EvoTreeType in [EvoTreeMLE, EvoTreeGaussian, EvoTreeCount, EvoTreeClassifier, EvoTreeRegressor]
config = EvoTreeType(nbins=32)
# should not throw an exception
@test check_args(config) == nothing
# invalid nbins
config.nbins = 256
@test_throws Exception check_args(config)
end
end
end | EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 3239 | using CUDA
using Statistics
using StatsBase: sample
using EvoTrees: sigmoid, logit
using Random: seed!
# prepare a dataset
seed!(123)
features = rand(10_000) .* 5
X = reshape(features, (size(features)[1], 1))
Y = sin.(features) .* 0.5 .+ 0.5
Y = logit(Y) + randn(size(Y))
Y = sigmoid(Y)
is = collect(1:size(X, 1))
# train-eval split
i_sample = sample(is, size(𝑖, 1), replace = false)
train_size = 0.8
i_train = i_sample[1:floor(Int, train_size * size(is, 1))]
i_eval = i_sample[floor(Int, train_size * size(is, 1))+1:end]
x_train, x_eval = X[i_train, :], X[i_eval, :]
y_train, y_eval = Y[i_train], Y[i_eval]
################################
# linear
################################
params1 = EvoTreeRegressor(
T = Float32,
loss = :linear,
metric = :none,
nrounds = 200,
nbins = 64,
lambda = 0.5,
gamma = 0.1,
eta = 0.1,
max_depth = 6,
min_weight = 1.0,
rowsample = 0.5,
colsample = 1.0,
)
@time model = fit_evotree_gpu(params1; x_train, y_train);
@time pred_train_linear = predict_gpu(model, x_train)
x_perm = sortperm(X_train[:, 1])
plot(
X_train,
Y_train,
msize = 1,
mcolor = "gray",
mswidth = 0,
background_color = RGB(1, 1, 1),
seriestype = :scatter,
xaxis = ("feature"),
yaxis = ("target"),
legend = true,
label = "",
)
plot!(
X_train[:, 1][x_perm],
pred_train_linear[x_perm],
color = "navy",
linewidth = 1.5,
label = "Linear",
)
# savefig("figures/regression_sinus_gpu.png")
params1 = EvoTreeRegressor(
T = Float32,
loss = :linear,
metric = :mse,
nrounds = 200,
nbins = 64,
lambda = 0.5,
gamma = 0.1,
eta = 0.1,
max_depth = 6,
min_weight = 1.0,
rowsample = 0.5,
colsample = 1.0,
device = "gpu",
)
@time model = fit_evotree_gpu(params1; x_train, y_train, print_every_n = 25);
@time model = fit_evotree_gpu(params1; x_train, y_train, x_eval, y_eval, print_every_n = 25);
@time pred_train_linear = predict_gpu(model, x_train)
################################
# Logistic
################################
params1 = EvoTreeRegressor(
T = Float32,
loss = :logistic,
metric = :logloss,
nrounds = 200,
nbins = 64,
lambda = 0.5,
gamma = 0.1,
eta = 0.1,
max_depth = 6,
min_weight = 1.0,
rowsample = 0.5,
colsample = 1.0,
device = "gpu",
)
@time model = fit_evotree_gpu(params1; x_train, y_train);
@time pred_train_linear = predict_gpu(model, x_train)
################################
# Gaussian
################################
params1 = EvoTreeGaussian(
T = Float64,
loss = :gaussian,
metric = :gaussian,
nrounds = 200,
nbins = 64,
lambda = 1.0,
gamma = 0.1,
eta = 0.1,
max_depth = 5,
min_weight = 100.0,
rowsample = 0.5,
colsample = 1.0,
rng = 123,
device = "gpu",
)
@time model = fit_evotree_gpu(params1; x_train, y_train, print_every_n = 25);
@time pred_train_gauss = predict_gpu(model, x_train)
pred_gauss = [
Distributions.Normal(pred_train_gauss[i, 1], pred_train_gauss[i, 2]) for
i in axes(pred_train_gauss, 1)
]
pred_q20 = quantile.(pred_gauss, 0.2)
pred_q80 = quantile.(pred_gauss, 0.8)
mean(Y_train .< pred_q80)
mean(Y_train .< pred_q20)
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 331 | using JLD2, EvoTrees
data = JLD2.load("data/data.jld2")
x_train = data["X"]
y_train = data["y"]
config = EvoTrees.EvoTreeMLE(; nrounds=100, eta=0.05, min_weight=2)
model = fit_evotree(config, metric=:gaussian_mle, x_train=x_train, y_train=y_train, x_eval=x_train, y_eval=y_train, print_every_n=1)
EvoTrees.predict(model, x_train)
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 2941 | using Statistics
using StatsBase: sample
using EvoTrees: sigmoid, logit
using EvoTrees: check_args, check_parameter
using CategoricalArrays
using DataFrames
using Random: seed!
# prepare a dataset
seed!(123)
nobs = 1_000
x_num = rand(nobs) .* 5
lvls = ["a", "b", "c"]
x_cat = categorical(rand(lvls, nobs), levels=lvls, ordered=false)
x_bool = rand(Bool, nobs)
x_num_m1 = Vector{Union{Missing,Float64}}(copy(x_num))
x_num_m2 = Vector{Any}(copy(x_num))
lvls_m1 = ["a", "b", "c", missing]
x_cat_m1 = categorical(rand(lvls_m1, nobs), levels=lvls)
x_bool_m1 = Vector{Union{Missing,Bool}}(copy(x_bool))
# train-eval split
is = collect(1:nobs)
i_sample = sample(is, nobs, replace=false)
train_size = 0.8
i_train = i_sample[1:floor(Int, train_size * nobs)]
i_eval = i_sample[floor(Int, train_size * nobs)+1:end]
# target var
y_tot = sin.(x_num) .* 0.5 .+ 0.5
y_tot = logit(y_tot) + randn(nobs)
y_tot = sigmoid(y_tot)
target_name = "y"
y_tot = sigmoid(y_tot)
y_tot_m1 = allowmissing(y_tot)
y_tot_m1[1] = missing
config = EvoTreeRegressor(
loss=:linear,
nrounds=100,
nbins=16,
lambda=0.5,
gamma=0.1,
eta=0.05,
max_depth=3,
min_weight=1.0,
rowsample=0.5,
colsample=1.0,
rng=123,
)
@testset "DataFrames - missing features" begin
df_tot = DataFrame(x_num=x_num, x_bool=x_bool, x_cat=x_cat, y=y_tot)
dtrain, deval = df_tot[i_train, :], df_tot[i_eval, :]
model = fit_evotree(
config,
dtrain;
target_name)
@test model.info[:fnames] == [:x_num, :x_bool, :x_cat]
# keep only fnames <= Real or Categorical
df_tot = DataFrame(x_num=x_num, x_num_m1=x_num_m1, x_num_m2=x_num_m2,
x_cat_m1=x_cat_m1, x_bool_m1=x_bool_m1, y=y_tot)
dtrain, deval = df_tot[i_train, :], df_tot[i_eval, :]
model = fit_evotree(
config,
dtrain;
target_name,
deval)
@test model.info[:fnames] == [:x_num]
model = fit_evotree(
config,
dtrain;
target_name,
fnames=[:x_num])
@test model.info[:fnames] == [:x_num]
# specifyin features with missings should error
@test_throws AssertionError fit_evotree(
config,
dtrain;
deval,
fnames=[:x_num, :x_num_m1, :x_num_m2, :x_cat_m1, :x_bool_m1],
target_name)
end
@testset "DataFrames - missing in target errors" begin
df_tot = DataFrame(x_num=x_num, x_bool=x_bool, x_cat=x_cat, y=y_tot_m1)
dtrain, deval = df_tot[i_train, :], df_tot[i_eval, :]
@test_throws AssertionError fit_evotree(
config,
dtrain;
target_name)
end
@testset "Matrix - missing features" begin
x_tot = allowmissing(hcat(x_num_m1))
@test_throws AssertionError fit_evotree(
config;
x_train=x_tot,
y_train=y_tot)
x_tot = Matrix{Any}(hcat(x_num_m2))
@test_throws AssertionError fit_evotree(
config;
x_train=x_tot,
y_train=y_tot)
end
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 9511 | @testset "Monotonic Constraints" begin
using Statistics
using StatsBase: sample
using EvoTrees
using EvoTrees: sigmoid, logit
# prepare a dataset
features = rand(10_000) .* 2.5
X = reshape(features, (size(features)[1], 1))
Y = sin.(features) .* 0.5 .+ 0.5
Y = logit(Y) + randn(size(Y)) .* 0.2
Y = sigmoid(Y)
is = collect(1:size(X, 1))
seed = 123
# train-eval split
i_sample = sample(is, size(is, 1), replace=false)
train_size = 0.8
i_train = i_sample[1:floor(Int, train_size * size(is, 1))]
i_eval = i_sample[floor(Int, train_size * size(is, 1))+1:end]
x_train, x_eval = X[i_train, :], X[i_eval, :]
y_train, y_eval = Y[i_train], Y[i_eval]
######################################
### MSE - CPU
######################################
# benchmark
params1 = EvoTreeRegressor(
device="cpu",
loss=:mse,
nrounds=200,
nbins=32,
lambda=1.0,
gamma=0.0,
eta=0.05,
max_depth=6,
min_weight=0.0,
rowsample=0.5,
colsample=1.0,
rng=seed,
)
model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, metric=:mse, print_every_n=25)
preds_ref = EvoTrees.predict(model, x_train);
# monotonic constraint
params1 = EvoTreeRegressor(
device="cpu",
loss=:mse,
nrounds=200,
nbins=32,
lambda=1.0,
gamma=0.0,
eta=0.5,
max_depth=6,
min_weight=0.0,
monotone_constraints=Dict(1 => 1),
rowsample=0.5,
colsample=1.0,
rng=seed,
)
model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, metric=:mse, print_every_n=25)
preds_mono = EvoTrees.predict(model, x_train);
# using Plots
# x_perm = sortperm(x_train[:, 1])
# plot(x_train, y_train, msize=1, mcolor="gray", mswidth=0, background_color=RGB(1, 1, 1), seriestype=:scatter, xaxis=("feature"), yaxis=("target"), legend=true, label="")
# plot!(x_train[:, 1][x_perm], preds_ref[x_perm], color="navy", linewidth=1.5, label="Reference")
# plot!(x_train[:, 1][x_perm], preds_mono[x_perm], color="red", linewidth=1.5, label="Monotonic")
######################################
### MSE - GPU
######################################
# benchmark
# params1 = EvoTreeRegressor(
# device="gpu",
# loss=:mse,
# nrounds=200, nbins=32,
# lambda=1.0, gamma=0.0, eta=0.05,
# max_depth=6, min_weight=0.0,
# rowsample=0.5, colsample=1.0, rng=seed)
# model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, metric=:mse, print_every_n=25);
# preds_ref = predict(model, x_train);
# # monotonic constraint
# params1 = EvoTreeRegressor(
# device="gpu",
# loss=:mse,
# nrounds=200, nbins=32,
# lambda=1.0, gamma=0.0, eta=0.5,
# max_depth=6, min_weight=0.0,
# monotone_constraints=Dict(1 => 1),
# rowsample=0.5, colsample=1.0, rng=seed)
# model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, metric=:mse, print_every_n=25);
# preds_mono = predict(model, x_train);
# using Plots
# x_perm = sortperm(x_train[:, 1])
# plot(x_train, y_train, msize=1, mcolor="gray", mswidth=0, background_color=RGB(1, 1, 1), seriestype=:scatter, xaxis=("feature"), yaxis=("target"), legend=true, label="")
# plot!(x_train[:, 1][x_perm], preds_ref[x_perm], color="navy", linewidth=1.5, label="Reference - GPU")
# plot!(x_train[:, 1][x_perm], preds_mono[x_perm], color="red", linewidth=1.5, label="Monotonic - GPU")
######################################
### Logloss - CPU
######################################
# benchmark
params1 = EvoTreeRegressor(
device="cpu",
loss=:logloss,
nrounds=200,
nbins=32,
lambda=0.05,
gamma=0.0,
eta=0.05,
max_depth=6,
min_weight=0.0,
rowsample=0.5,
colsample=1.0,
rng=seed,
)
model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, metric=:logloss, print_every_n=25)
preds_ref = predict(model, x_train)
# monotonic constraint
params1 = EvoTreeRegressor(
device="cpu",
loss=:logloss,
nrounds=200,
nbins=32,
lambda=0.05,
gamma=0.0,
eta=0.05,
max_depth=6,
min_weight=0.0,
monotone_constraints=Dict(1 => 1),
rowsample=0.5,
colsample=1.0,
rng=seed,
)
model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, metric=:logloss, print_every_n=25)
preds_mono = predict(model, x_train)
# x_perm = sortperm(x_train[:, 1])
# plot(x_train, y_train, msize=1, mcolor="gray", mswidth=0, background_color=RGB(1, 1, 1), seriestype=:scatter, xaxis=("feature"), yaxis=("target"), legend=true, label="")
# plot!(x_train[:, 1][x_perm], preds_ref[x_perm], color="navy", linewidth=1.5, label="Reference")
# plot!(x_train[:, 1][x_perm], preds_mono[x_perm], color="red", linewidth=1.5, label="Monotonic")
######################################
### LogLoss - GPU
######################################
# benchmark
# params1 = EvoTreeRegressor(
# device="gpu",
# loss=:logloss, metric=:logloss,
# nrounds=200, nbins=32,
# lambda=0.05, gamma=0.0, eta=0.05,
# max_depth=6, min_weight=0.0,
# rowsample=0.5, colsample=1.0, rng=seed)
# model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25);
# preds_ref = EvoTrees.predict(model, x_train);
# # monotonic constraint
# params1 = EvoTreeRegressor(
# device="gpu",
# loss=:logloss, metric=:logloss,
# nrounds=200, nbins=32,
# lambda=0.05, gamma=0.0, eta=0.05,
# max_depth=6, min_weight=0.0,
# monotone_constraints=Dict(1 => 1),
# rowsample=0.5, colsample=1.0, rng=seed)
# model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25);
# preds_mono = EvoTrees.predict(model, x_train);
# using Plots
# using Colors
# x_perm = sortperm(X_train[:, 1])
# plot(X_train, Y_train, msize=1, mcolor="gray", mswidth=0, background_color=RGB(1, 1, 1), seriestype=:scatter, xaxis=("feature"), yaxis=("target"), legend=true, label="")
# plot!(X_train[:, 1][x_perm], preds_ref[x_perm], color="navy", linewidth=1.5, label="Reference")
# plot!(X_train[:, 1][x_perm], preds_mono[x_perm], color="red", linewidth=1.5, label="Monotonic")
######################################
### Gaussian - CPU
######################################
# benchmark
params1 = EvoTreeGaussian(
device="cpu",
metric=:gaussian,
nrounds=200,
nbins=32,
lambda=1.0,
gamma=0.0,
eta=0.05,
max_depth=6,
min_weight=0.0,
rowsample=0.5,
colsample=1.0,
rng=seed,
)
model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, metric=:gaussian_mle, print_every_n=25)
preds_ref = predict(model, x_train)
# monotonic constraint
params1 = EvoTreeGaussian(
device="cpu",
metric=:gaussian,
nrounds=200,
nbins=32,
lambda=1.0,
gamma=0.0,
eta=0.5,
max_depth=6,
min_weight=0.0,
monotone_constraints=Dict(1 => 1),
rowsample=0.5,
colsample=1.0,
rng=seed,
)
model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, metric=:gaussian_mle, print_every_n=25)
preds_mono = EvoTrees.predict(model, x_train)
# x_perm = sortperm(x_train[:, 1])
# plot(x_train, y_train, msize=1, mcolor="gray", mswidth=0, background_color=RGB(1, 1, 1), seriestype=:scatter, xaxis=("feature"), yaxis=("target"), legend=true, label="")
# plot!(x_train[:, 1][x_perm], preds_ref[x_perm], color="navy", linewidth=1.5, label="Reference")
# plot!(x_train[:, 1][x_perm], preds_mono[x_perm], color="red", linewidth=1.5, label="Monotonic")
######################################
### Gaussian - GPU
######################################
# benchmark
# params1 = EvoTreeGaussian(
# device="gpu",
# metric=:gaussian,
# nrounds=200, nbins=32,
# lambda=1.0, gamma=0.0, eta=0.05,
# max_depth=6, min_weight=0.0,
# rowsample=0.5, colsample=1.0, rng=seed)
# model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25)
# preds_ref = EvoTrees.predict(model, x_train)
# # monotonic constraint
# params1 = EvoTreeGaussian(
# device="gpu",
# metric=:gaussian,
# nrounds=200, nbins=32,
# lambda=1.0, gamma=0.0, eta=0.5,
# max_depth=6, min_weight=0.0,
# monotone_constraints=Dict(1 => 1),
# rowsample=0.5, colsample=1.0, rng=seed)
# model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25)
# preds_mono = EvoTrees.predict(model, x_train)
# x_perm = sortperm(x_train[:, 1])
# plot(x_train, y_train, msize=1, mcolor="gray", mswidth=0, background_color=RGB(1, 1, 1), seriestype=:scatter, xaxis=("feature"), yaxis=("target"), legend=true, label="GPU Gauss")
# plot!(x_train[:, 1][x_perm], preds_ref[x_perm], color="navy", linewidth=1.5, label="Reference")
# plot!(x_train[:, 1][x_perm], preds_mono[x_perm], color="red", linewidth=1.5, label="Monotonic")
end | EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 3648 | using Statistics
using StatsBase: sample
using EvoTrees: sigmoid, logit
using EvoTrees: check_args, check_parameter
using Random: seed!
# prepare a dataset
seed!(123)
nobs = 2_000
features = rand(nobs) .* 5
X = reshape(features, (size(features)[1], 1))
Y = sin.(features) .* 0.5 .+ 0.5
Y = logit(Y) + randn(size(Y)) .* 0.1
Y = sigmoid(Y)
is = collect(1:size(X, 1))
# train-eval split
i_sample = sample(is, size(is, 1), replace=false)
train_size = 0.8
i_train = i_sample[1:floor(Int, train_size * size(is, 1))]
i_eval = i_sample[floor(Int, train_size * size(is, 1))+1:end]
x_train, x_eval = X[i_train, :], X[i_eval, :]
y_train, y_eval = Y[i_train], Y[i_eval]
Yc = (Y .> 0.8) .+ 1
y_train_c, y_eval_c = Yc[i_train], Yc[i_eval]
@testset "oblivious regressor" begin
@testset for loss in [:mse, :logloss, :quantile, :l1, :gamma, :tweedie]
metric = loss == :l1 ? :mae : loss
config = EvoTreeRegressor(
loss=loss,
tree_type="oblivious",
nrounds=200,
nbins=32,
rng=123,
)
model, cache = EvoTrees.init(config, x_train, y_train)
preds_ini = model(x_eval)
mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2)
model = fit_evotree(
config;
x_train,
y_train,
x_eval,
y_eval,
metric=metric,
print_every_n=25
)
preds = model(x_eval)
mse_error = mean(abs.(preds .- y_eval) .^ 2)
mse_gain_pct = mse_error / mse_error_ini - 1
@test mse_gain_pct < -0.75
end
end
@testset "oblivious count" begin
config = EvoTreeCount(
tree_type="oblivious",
nrounds=200,
nbins=32,
rng=123,
)
model, cache = EvoTrees.init(config, x_train, y_train)
preds_ini = model(x_eval)
mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2)
model = fit_evotree(
config;
x_train,
y_train,
x_eval,
y_eval,
metric=:poisson,
print_every_n=25
)
preds = model(x_eval)
mse_error = mean(abs.(preds .- y_eval) .^ 2)
mse_gain_pct = mse_error / mse_error_ini - 1
@test mse_gain_pct < -0.75
end
@testset "oblivious MLE" begin
@testset for loss in [:gaussian_mle, :logistic_mle]
config = EvoTreeMLE(
loss=loss,
tree_type="oblivious",
nrounds=200,
nbins=32,
rng=123,
)
model, cache = EvoTrees.init(config, x_train, y_train)
preds_ini = model(x_eval)[:, 1]
mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2)
model = fit_evotree(
config;
x_train,
y_train,
x_eval,
y_eval,
metric=loss,
print_every_n=25
)
preds = model(x_eval)[:, 1]
mse_error = mean(abs.(preds .- y_eval) .^ 2)
mse_gain_pct = mse_error / mse_error_ini - 1
@test mse_gain_pct .< 0.75
end
end
@testset "oblivious classifier" begin
config = EvoTreeClassifier(
tree_type="oblivious",
nrounds=200,
nbins=32,
rng=123,
)
model, cache = EvoTrees.init(config, x_train, y_train_c)
preds_ini = model(x_eval)
acc_ini = mean(map(argmax, eachrow(preds_ini)) .== y_eval_c)
model = fit_evotree(
config;
x_train,
y_train=y_train_c,
x_eval,
y_eval=y_eval_c,
metric=:mlogloss,
print_every_n=25
)
preds = model(x_eval)
acc = mean(map(argmax, eachrow(preds)) .== y_eval_c)
@test acc > 0.9
end
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 703 | using Random
using Plots
using Revise
using EvoTrees
# @load "blog/model_linear.bson" model
# @load "data/model_linear_8.bson" model
# @load "data/model_gaussian_5.bson" model
model = EvoTrees.load("data/model_linear_4.bson");
var_names = ["var_$i" for i = 1:100]
plot(model)
plot(model, 2)
plot(model, 3, var_names)
plot(model.trees[2])
plot(model.trees[2], var_names)
typeof(tree_layout[1])
BezierCurve(tree_layout[1])
mutable struct BCurve{T<:GeometryBasics.Point}
control_points::Vector{T}
end
function (bc::BCurve)(t::Real)
p = zero(P2)
n = length(bc.control_points) - 1
for i = 0:n
p += bc.control_points[i+1] * binomial(n, i) * (1 - t)^(n - i) * t^i
end
p
end
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 402 | using Statistics
using EvoTrees
using EvoTrees: predict
using CategoricalArrays
using Tables
using Random
using Test
@testset "EvoTrees" begin
@testset "Internal API" begin
include("core.jl")
include("oblivious.jl")
include("tables.jl")
include("monotonic.jl")
include("missings.jl")
end
@testset "MLJ" begin
include("MLJ.jl")
end
end
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 1983 | using Statistics
using StatsBase: sample, quantile
using Distributions
using Random
using EvoTrees
using EvoTrees: sigmoid, logit
using Serialization
# prepare a dataset
Random.seed!(12)
features = rand(10_000) .* 5
X = reshape(features, (size(features)[1], 1))
Y = sin.(features) .* 0.5 .+ 0.5
Y = logit(Y) + randn(size(Y))
Y = sigmoid(Y)
𝑖 = collect(1:size(X, 1))
# train-eval split
𝑖_sample = sample(𝑖, size(𝑖, 1), replace = false)
train_size = 0.8
𝑖_train = 𝑖_sample[1:floor(Int, train_size * size(𝑖, 1))]
𝑖_eval = 𝑖_sample[floor(Int, train_size * size(𝑖, 1))+1:end]
x_train, x_eval = X[𝑖_train, :], X[𝑖_eval, :]
y_train, y_eval = Y[𝑖_train], Y[𝑖_eval]
# linear
params1 = EvoTreeRegressor(
T = Float64,
loss = :linear,
metric = :mse,
nrounds = 200,
nbins = 64,
lambda = 0.1,
gamma = 0.1,
eta = 0.05,
max_depth = 6,
min_weight = 1.0,
rowsample = 0.5,
colsample = 1.0,
rng = 123,
)
m = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n = 25);
p = m(x_eval)
# serialize(joinpath(@__DIR__, "..", "data", "save-load-test-m-v182.dat"), m);
# serialize(joinpath(@__DIR__, "..", "data", "save-load-test-p-v182.dat"), p);
# m_172 = deserialize(joinpath(@__DIR__, "..", "data", "save-load-test-m-v172.dat"));
# p_172 = deserialize(joinpath(@__DIR__, "..", "data", "save-load-test-p-v172.dat"));
# pm_172 = m_172(x_eval)
# m_180 = deserialize(joinpath(@__DIR__, "..", "data", "save-load-test-m-v180.dat"));
# p_180 = deserialize(joinpath(@__DIR__, "..", "data", "save-load-test-p-v180.dat"));
# pm_180 = m_180(x_eval)
# m_182 = deserialize(joinpath(@__DIR__, "..", "data", "save-load-test-m-v182.dat"));
# p_182 = deserialize(joinpath(@__DIR__, "..", "data", "save-load-test-p-v182.dat"));
# pm_182 = m_182(x_eval)
# @assert all(p .== p_172)
# @assert all(p .== pm_172)
# @assert all(p .== p_180)
# @assert all(p .== pm_180)
# @assert all(p .== p_182)
# @assert all(p .== pm_182)
# @info "test successful! 🚀" | EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 4493 | using Statistics
using StatsBase: sample
using EvoTrees: sigmoid, logit
using EvoTrees: check_args, check_parameter
using CategoricalArrays
using DataFrames
using Random: seed!
# prepare a dataset
seed!(123)
nobs = 1_000
x_num = rand(nobs) .* 5
lvls = ["a", "b", "c"]
x_cat = categorical(rand(lvls, nobs), levels=lvls, ordered=false)
x_bool = rand(Bool, nobs)
# train-eval split
is = collect(1:nobs)
i_sample = sample(is, nobs, replace=false)
train_size = 0.8
i_train = i_sample[1:floor(Int, train_size * nobs)]
i_eval = i_sample[floor(Int, train_size * nobs)+1:end]
# target var
y_tot = sin.(x_num) .* 0.5 .+ 0.5
y_tot = logit(y_tot) + randn(nobs)
y_tot = sigmoid(y_tot)
target_name = "y"
config = EvoTreeRegressor(
loss=:linear,
nrounds=100,
nbins=16,
lambda=0.5,
gamma=0.1,
eta=0.05,
max_depth=6,
min_weight=1.0,
rowsample=0.5,
colsample=1.0,
rng=123,
)
@testset "Tables - NTuples" begin
dtrain = (x1=x_num[i_train], y=y_tot[i_train])
deval = (x1=x_num[i_eval], y=y_tot[i_eval])
y_train, y_eval = y_tot[i_train], y_tot[i_eval]
m, cache = EvoTrees.init(config, dtrain; target_name="y")
preds_ini = EvoTrees.predict(m, deval)
mse_error_ini = mean((preds_ini .- y_eval) .^ 2)
model = fit_evotree(
config,
dtrain;
target_name)
preds = EvoTrees.predict(model, deval)
mse_error = mean((preds .- y_eval) .^ 2)
mse_gain_pct = mse_error / mse_error_ini - 1
@test mse_gain_pct < -0.75
model = fit_evotree(
config,
dtrain;
target_name,
deval,
metric=:mse,
print_every_n=25)
preds = EvoTrees.predict(model, deval)
mse_error = mean((preds .- y_eval) .^ 2)
mse_gain_pct = mse_error / mse_error_ini - 1
@test mse_gain_pct < -0.75
end
@testset "Tables - DataFrames" begin
df_tot = DataFrame(x_num=x_num, y=y_tot)
dtrain, deval = df_tot[i_train, :], df_tot[i_eval, :]
y_train, y_eval = y_tot[i_train], y_tot[i_eval]
m, cache = EvoTrees.init(config, dtrain; target_name="y")
preds_ini = EvoTrees.predict(m, deval)
mse_error_ini = mean((preds_ini .- y_eval) .^ 2)
model = fit_evotree(
config,
dtrain;
target_name)
preds = EvoTrees.predict(model, deval)
mse_error = mean((preds .- y_eval) .^ 2)
mse_gain_pct = mse_error / mse_error_ini - 1
@test mse_gain_pct < -0.75
model = fit_evotree(
config,
dtrain;
target_name,
deval,
metric=:mse,
print_every_n=25)
preds = EvoTrees.predict(model, deval)
mse_error = mean((preds .- y_eval) .^ 2)
mse_gain_pct = mse_error / mse_error_ini - 1
@test mse_gain_pct < -0.75
end
@testset "Tables - num/bool/cat" begin
y_tot = sin.(x_num) .* 0.5 .+ 0.5
y_tot = logit(y_tot) .+ randn(nobs) .+ 1.0 .* (x_cat .== "b") .- 1.0 .* (x_cat .== "c") .+ 1.0 .* x_bool
y_tot = sigmoid(y_tot)
y_train, y_eval = y_tot[i_train], y_tot[i_eval]
df_tot = DataFrame(x_num=x_num, x_bool=x_bool, x_cat=x_cat, y=y_tot)
dtrain, deval = df_tot[i_train, :], df_tot[i_eval, :]
m, cache = EvoTrees.init(config, dtrain; target_name)
preds_ini = EvoTrees.predict(m, deval)
mse_error_ini = mean((preds_ini .- y_eval) .^ 2)
model = fit_evotree(
config,
dtrain;
target_name)
@test model.info[:fnames] == [:x_num, :x_bool, :x_cat]
preds = EvoTrees.predict(model, deval)
mse_error = mean((preds .- y_eval) .^ 2)
mse_gain_pct = mse_error / mse_error_ini - 1
@test mse_gain_pct < -0.75
end
@testset "Tables - bool/cat" begin
y_tot = sin.(x_num) .* 0.1 .+ 0.5
y_tot = logit(y_tot) .+ randn(nobs) .+ 2.0 .* (x_cat .== "b") .- 3.0 .* (x_cat .== "c") .+ 3.0 .* x_bool
y_tot = sigmoid(y_tot)
y_train, y_eval = y_tot[i_train], y_tot[i_eval]
df_tot = DataFrame(x_num=x_num, x_bool=x_bool, x_cat=x_cat, y=y_tot)
dtrain, deval = df_tot[i_train, :], df_tot[i_eval, :]
fnames = [:x_bool, :x_cat]
m, cache = EvoTrees.init(config, dtrain; target_name, fnames)
preds_ini = EvoTrees.predict(m, deval)
mse_error_ini = mean((preds_ini .- y_eval) .^ 2)
model = fit_evotree(
config,
dtrain;
target_name,
fnames)
@test model.info[:fnames] == fnames
preds = EvoTrees.predict(model, deval)
mse_error = mean((preds .- y_eval) .^ 2)
mse_gain_pct = mse_error / mse_error_ini - 1
@test mse_gain_pct < -0.75
end
| EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
|
[
"Apache-2.0"
] | 0.16.7 | 92d1f78f95f4794bf29bd972dacfa37ea1fec9f4 | code | 3119 | using Statistics
using StatsBase: sample
using EvoTrees: sigmoid, logit
# prepare a dataset
features = rand(10_000) .* 2
X = reshape(features, (size(features)[1], 1))
noise = exp.(randn(length(X)))
Y = 2 .+ 3 .* X .+ noise
W = noise
𝑖 = collect(1:size(X, 1))
seed = 123
# train-eval split
𝑖_sample = sample(𝑖, size(𝑖, 1), replace=false)
train_size = 0.8
𝑖_train = 𝑖_sample[1:floor(Int, train_size * size(𝑖, 1))]
𝑖_eval = 𝑖_sample[floor(Int, train_size * size(𝑖, 1))+1:end]
x_train, x_eval = X[𝑖_train, :], X[𝑖_eval, :]
y_train, y_eval = Y[𝑖_train], Y[𝑖_eval]
w_train = W[𝑖_train]
w_eval = W[𝑖_eval]
# linear - no weights
params1 = EvoTreeRegressor(T=Float32, device="gpu",
loss=:linear, metric=:mse,
nrounds=100, nbins=100,
lambda=0.0, gamma=0.1, eta=0.05,
max_depth=6, min_weight=0.0,
rowsample=0.5, colsample=1.0, rng=seed)
model, cache = EvoTrees.init_evotree(params1, X_train, Y_train)
model = fit_evotree(params1, x_train, y_train, x_eval, y_eval, print_every_n=25);
preds_no_weight = predict(model, X_train)
# linear - weighted
params1 = EvoTreeRegressor(T=Float32, device="gpu",
loss=:linear, metric=:mse,
nrounds=100, nbins=100,
lambda=0.0, gamma=0.1, eta=0.05,
max_depth=6, min_weight=0.0,
rowsample=0.5, colsample=1.0, rng=seed)
model, cache = EvoTrees.init_evotree(params1, X_train, Y_train, W_train)
model = fit_evotree(params1; x_train, y_train, w_train, x_eval, y_eval, print_every_n=25);
preds_weighted_1 = predict(model, X_train)
params1 = EvoTreeRegressor(T=Float32, device="gpu",
loss=:linear, metric=:mse,
nrounds=100, nbins=100,
lambda=0.0, gamma=0.1, eta=0.05,
max_depth=6, min_weight=0.0,
rowsample=0.5, colsample=1.0, rng=seed)
model, cache = EvoTrees.init_evotree(params1, X_train, Y_train, W_train)
model = fit_evotree(params1, x_train, y_train, w_train, x_eval, y_eval, w_eval, print_every_n=25);
preds_weighted_2 = predict(model, X_train)
params1 = EvoTreeRegressor(T=Float32, device="gpu",
loss=:linear, metric=:mse,
nrounds=100, nbins=100,
lambda=0.0, gamma=0.1, eta=0.05,
max_depth=6, min_weight=0.0,
rowsample=0.5, colsample=1.0, rng=seed)
w_train_3 = ones(eltype(Y_train), size(Y_train)) .* 5
model, cache = EvoTrees.init_evotree(params1, X_train, Y_train, W_train_3)
model = fit_evotree(params1, x_train, y_train, w_train=w_train_3, x_eval, y_eval, print_every_n=25);
preds_weighted_3 = predict(model, x_train)
sum(abs.(preds_no_weight .- preds_weighted_3))
cor(preds_no_weight, preds_weighted_3)
ϵ = vec(abs.(preds_no_weight .- preds_weighted_3))
minimum(ϵ)
maximum(ϵ)
mean(ϵ)
minimum(preds_no_weight)
maximum(preds_no_weight)
mean(preds_no_weight)
# using Plots
# # using Colors
# x_perm = sortperm(X_train[:,1])
# plot(X_train, Y_train, msize=1, mcolor="gray", mswidth=0, background_color=RGB(1, 1, 1), seriestype=:scatter, xaxis=("feature"), yaxis=("target"), legend=true, label="")
# plot!(X_train[:,1][x_perm], preds_no_weight[x_perm], color="navy", linewidth=1.5, label="No weights")
# plot!(X_train[:,1][x_perm], preds_weighted[x_perm], color="red", linewidth=1.5, label="Weighted") | EvoTrees | https://github.com/Evovest/EvoTrees.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.