licenses
sequencelengths
1
3
version
stringclasses
677 values
tree_hash
stringlengths
40
40
path
stringclasses
1 value
type
stringclasses
2 values
size
stringlengths
2
8
text
stringlengths
25
67.1M
package_name
stringlengths
2
41
repo
stringlengths
33
86
[ "MIT" ]
6.157.0
8977ef8249b602e4cb46ddbaf3c51e6adc2958c7
code
1830
using LinearAlgebra, OrdinaryDiffEq, Test f = (du, u, p, t) -> du .= u ./ t jac = (J, u, p, t) -> (J[1, 1] = 1 / t; J[2, 2] = 1 / t; J[1, 2] = 0; J[2, 1] = 0) jp_diag = Diagonal(zeros(2)) fun = ODEFunction(f; jac = jac, jac_prototype = jp_diag) prob = ODEProblem(fun, ones(2), (1.0, 10.0)) sol = solve(prob, Rosenbrock23()) @test sol.u[end] β‰ˆ [10.0, 10.0] @test length(sol) < 60 sol = solve(prob, Rosenbrock23(autodiff = false)) @test sol.u[end] β‰ˆ [10.0, 10.0] @test length(sol) < 60 jp = Tridiagonal(jp_diag) fun = ODEFunction(f; jac = jac, jac_prototype = jp) prob = ODEProblem(fun, ones(2), (1.0, 10.0)) sol = solve(prob, Rosenbrock23()) @test sol.u[end] β‰ˆ [10.0, 10.0] @test length(sol) < 60 sol = solve(prob, Rosenbrock23(autodiff = false)) @test sol.u[end] β‰ˆ [10.0, 10.0] @test length(sol) < 60 #= jp = SymTridiagonal(jp_diag) fun = ODEFunction(f; jac=jac, jac_prototype=jp) prob = ODEProblem(fun,ones(2),(1.0,10.0)) sol = solve(prob,Rosenbrock23()) @test sol[end] β‰ˆ [10.0,10.0] @test length(sol) < 60 =# # Don't test the autodiff=false version here because it's not as numerically stable, # so lack of optimizations would lead to unsymmetric which causes an error: # LoadError: ArgumentError: broadcasted assignment breaks symmetry between locations (1, 2) and (2, 1) @test_broken begin jp = Hermitian(jp_diag) fun = ODEFunction(f; jac = jac, jac_prototype = jp) prob = ODEProblem(fun, ones(2), (1.0, 10.0)) sol = solve(prob, Rosenbrock23(autodiff = false)) @test sol.u[end] β‰ˆ [10.0, 10.0] @test length(sol) < 60 end @test_broken begin jp = Symmetric(jp_diag) fun = ODEFunction(f; jac = jac, jac_prototype = jp) prob = ODEProblem(fun, ones(2), (1.0, 10.0)) sol = solve(prob, Rosenbrock23(autodiff = false)) @test sol.u[end] β‰ˆ [10.0, 10.0] @test length(sol) < 60 end
DiffEqBase
https://github.com/SciML/DiffEqBase.jl.git
[ "MIT" ]
6.157.0
8977ef8249b602e4cb46ddbaf3c51e6adc2958c7
code
1634
using Distributed addprocs(2) println("There are $(nprocs()) processes") @everywhere begin using Pkg Pkg.activate("downstream") Pkg.develop(PackageSpec(path = joinpath(pwd(), ".."))) Pkg.instantiate() using OrdinaryDiffEq prob = ODEProblem((u, p, t) -> 1.01u, 0.5, (0.0, 1.0)) u0s = [rand() * prob.u0 for i in 1:2] function prob_func(prob, i, repeat) println("Running trajectory $i") ODEProblem(prob.f, u0s[i], prob.tspan) end end ensemble_prob = EnsembleProblem(prob, prob_func = prob_func) sim = solve(ensemble_prob, Tsit5(), EnsembleSplitThreads(), trajectories = 2) @everywhere function lorenz!(du, u, p, t) du[1] = 10.0 * (u[2] - u[1]) du[2] = u[1] * (28.0 - u[3]) - u[2] du[3] = u[1] * u[2] - (8 / 3) * u[3] end u0 = [1.0, 0.0, 0.0] tspan = (0.0, 100.0) p = [1, 2.0, 3] prob = ODEProblem(lorenz!, u0, tspan, p) @everywhere function prob_func(prob, i, repeat) prob = remake(prob, tspan = (rand(), 100.0), p = rand(3)) return prob end ensemble_prob = EnsembleProblem(prob, prob_func = prob_func, safetycopy = true) println("Running EnsembleSerial()") @test length(solve(ensemble_prob, Tsit5(), EnsembleSerial(), trajectories = 100)) == 100 println("Running EnsembleThreads()") @test length(solve(ensemble_prob, Tsit5(), EnsembleThreads(), trajectories = 100)) == 100 println("Running EnsembleDistributed()") @test length(solve(ensemble_prob, Tsit5(), EnsembleDistributed(), trajectories = 100)) == 100 println("Running EnsembleSplitThreads()") @test length(solve(ensemble_prob, Tsit5(), EnsembleSplitThreads(), trajectories = 100)) == 100
DiffEqBase
https://github.com/SciML/DiffEqBase.jl.git
[ "MIT" ]
6.157.0
8977ef8249b602e4cb46ddbaf3c51e6adc2958c7
code
847
using OrdinaryDiffEq ## https://github.com/SciML/DifferentialEquations.jl/issues/1013 mutable struct SomeObject position::Any velocity::Any trajectory::Any end object = SomeObject(0, 1, nothing) # Current dynamics don't involve the object for the sake of MWE, but they could. function dynamics(du, u, p, t) du[1] = u[2] du[2] = -u[2] end for i in 1:2 initial_state = [0, 0] tspan = (0.0, 5.0) prob = ODEProblem(dynamics, initial_state, tspan, object) sol = solve(prob, Tsit5()) object.trajectory = sol end # https://github.com/SciML/DiffEqBase.jl/issues/1003 f(u, p, t) = 1.01 * u u0 = 1 / 2 tspan = (0.0, 1.0) prob = ODEProblem(f, u0, tspan) sol = solve(prob, Tsit5(), reltol = 1e-8, abstol = 1e-8) prob2 = ODEProblem((du, u, p, t) -> du[1] = 1, [0.0], (0, 10), (; x = sol)) solve(prob2, Tsit5())
DiffEqBase
https://github.com/SciML/DiffEqBase.jl.git
[ "MIT" ]
6.157.0
8977ef8249b602e4cb46ddbaf3c51e6adc2958c7
code
3541
using StochasticDiffEq, DiffEqBase, OrdinaryDiffEq using Test, Random, Statistics import SDEProblemLibrary: prob_sde_2Dlinear, prob_sde_additivesystem, prob_sde_lorenz import ODEProblemLibrary: prob_ode_linear prob = prob_sde_2Dlinear prob2 = EnsembleProblem(prob) sim = solve(prob2, SRIW1(), dt = 1 // 2^(3), trajectories = 10) @test sim.u[1] isa DiffEqBase.RODESolution @test sim[1, 2] isa Matrix @test sim[1, 2, 1] isa Float64 sim = solve(prob2, SRIW1(), EnsembleThreads(), dt = 1 // 2^(3), trajectories = 10) err_sim = DiffEqBase.calculate_ensemble_errors(sim; weak_dense_errors = true) @test length(sim) == 10 sim = solve(prob2, SRIW1(), EnsembleThreads(), dt = 1 // 2^(3), trajectories = 10, batch_size = 2) err_sim = DiffEqBase.calculate_ensemble_errors(sim; weak_dense_errors = true) @test length(sim) == 10 sim = solve(prob2, SRIW1(), EnsembleThreads(), dt = 1 // 2^(3), adaptive = false, trajectories = 10) err_sim = DiffEqBase.calculate_ensemble_errors(sim; weak_timeseries_errors = true) sim = solve(prob2, SRIW1(), EnsembleThreads(), dt = 1 // 2^(3), trajectories = 10) DiffEqBase.calculate_ensemble_errors(sim) @test length(sim) == 10 sim = solve(prob2, SRIW1(), EnsembleSplitThreads(), dt = 1 // 2^(3), trajectories = 10) DiffEqBase.calculate_ensemble_errors(sim) @test length(sim) == 10 sim = solve(prob2, SRIW1(), EnsembleSerial(), dt = 1 // 2^(3), trajectories = 10) DiffEqBase.calculate_ensemble_errors(sim) @test length(sim) == 10 prob = prob_sde_additivesystem prob2 = EnsembleProblem(prob) sim = solve(prob2, SRA1(), dt = 1 // 2^(3), trajectories = 10) DiffEqBase.calculate_ensemble_errors(sim) output_func = function (sol, i) last(last(sol))^2, false end prob2 = EnsembleProblem(prob, output_func = output_func) sim = solve(prob2, SRA1(), dt = 1 // 2^(3), trajectories = 10) prob = prob_sde_lorenz prob2 = EnsembleProblem(prob) sim = solve(prob2, SRIW1(), dt = 1 // 2^(3), trajectories = 10) output_func = function (sol, i) last(sol), false end prob = prob_ode_linear prob_func = function (prob, i, repeat) ODEProblem(prob.f, rand() * prob.u0, prob.tspan, 1.01) end Random.seed!(100) reduction = function (u, batch, I) u = append!(u, batch) u, ((var(u) / sqrt(last(I))) / mean(u) < 0.5) ? true : false end prob2 = EnsembleProblem(prob, prob_func = prob_func, output_func = output_func, reduction = reduction, u_init = Vector{Float64}(), safetycopy = false) sim = solve(prob2, Tsit5(), trajectories = 10000, batch_size = 20) @test sim.converged == true prob_func = function (prob, i, repeat) ODEProblem(prob.f, (1 + i / 100) * prob.u0, prob.tspan, 1.01) end reduction = function (u, batch, I) u = append!(u, batch) u, false end prob2 = EnsembleProblem(prob, prob_func = prob_func, output_func = output_func, reduction = reduction, u_init = Vector{Float64}()) sim = solve(prob2, Tsit5(), trajectories = 100, batch_size = 20) @test sim.converged == false reduction = function (u, batch, I) u + sum(batch), false end prob2 = EnsembleProblem(prob, prob_func = prob_func, output_func = output_func, reduction = reduction, u_init = 0.0) sim2 = solve(prob2, Tsit5(), trajectories = 100, batch_size = 20) @test sim2.converged == false @test mean(sim.u) β‰ˆ sim2.u / 100 struct SomeUserType end output_func = function (sol, i) (SomeUserType(), false) end prob2 = EnsembleProblem(prob, prob_func = prob_func, output_func = output_func) sim2 = solve(prob2, Tsit5(), trajectories = 2) @test sim2.converged && typeof(sim2.u) == Vector{SomeUserType}
DiffEqBase
https://github.com/SciML/DiffEqBase.jl.git
[ "MIT" ]
6.157.0
8977ef8249b602e4cb46ddbaf3c51e6adc2958c7
code
2467
using OrdinaryDiffEq, ForwardDiff, Zygote, Test using SciMLSensitivity using Random function dt!(du, u, p, t) x, y = u Ξ±, Ξ², Ξ΄, Ξ³ = p du[1] = dx = Ξ± * x - Ξ² * x * y du[2] = dy = -Ξ΄ * y + Ξ³ * x * y end n_par = 3 Random.seed!(2) u0 = rand(2, n_par) u0[:, 1] = [1.0, 1.0] tspan = (0.0, 10.0) p = [2.2, 1.0, 2.0, 0.4] prob_ode = ODEProblem(dt!, u0[:, 1], tspan) function test_loss(p1, prob) function prob_func(prob, i, repeat) @show i remake(prob, u0 = u0[:, i]) end #define ensemble problem ensembleprob = EnsembleProblem(prob, prob_func = prob_func) u = Array(solve(ensembleprob, Tsit5(), EnsembleThreads(), trajectories = n_par, p = p1, sensealg = ForwardDiffSensitivity(), saveat = 0.1, dt = 0.001))[:, end, :] loss = sum(u) return loss end test_loss(p, prob_ode) @time gs = Zygote.gradient(p) do p test_loss(p, prob_ode) end @test gs[1] isa Vector ### https://github.com/SciML/DiffEqFlux.jl/issues/595 function fiip(du, u, p, t) du[1] = dx = p[1] * u[1] - p[2] * u[1] * u[2] du[2] = dy = -p[3] * u[2] + p[4] * u[1] * u[2] end p = [1.5, 1.0, 3.0, 1.0]; u0 = [1.0; 1.0]; prob = ODEProblem(fiip, u0, (0.0, 10.0), p) sol = solve(prob, Tsit5()) function sum_of_solution(x) _prob = remake(prob, u0 = x[1:2], p = x[3:end]) sum(solve(_prob, Tsit5(), saveat = 0.1)) end Zygote.gradient(sum_of_solution, [u0; p]) # Testing ensemble problem. Works with ForwardDiff. Does not work with Zygote. N = 3 eu0 = rand(N, 2) ep = rand(N, 4) ensemble_prob = EnsembleProblem(prob, prob_func = (prob, i, repeat) -> remake(prob, u0 = eu0[i, :], p = ep[i, :], saveat = 0.1)) esol = solve(ensemble_prob, Tsit5(), trajectories = N) cache = Ref{Any}() function sum_of_e_solution(p) ensemble_prob = EnsembleProblem(prob, prob_func = (prob, i, repeat) -> remake(prob, u0 = eu0[i, :], p = p[i, :], saveat = 0.1)) sol = solve(ensemble_prob, Tsit5(), EnsembleSerial(), trajectories = N, abstol = 1e-12, reltol = 1e-12) z = Array(sol.u[1]) cache[] = sol.u[1].t sum(z) # just test for the first solutions, gradients should be zero for others end sum_of_e_solution(ep) x = ForwardDiff.gradient(sum_of_e_solution, ep) y = Zygote.gradient(sum_of_e_solution, ep)[1] # Zygote second to test cache of forward pass @test x β‰ˆ y @test cache[] == 0:0.1:10.0 # test prob.kwargs is forwarded
DiffEqBase
https://github.com/SciML/DiffEqBase.jl.git
[ "MIT" ]
6.157.0
8977ef8249b602e4cb46ddbaf3c51e6adc2958c7
code
3903
using StochasticDiffEq, DiffEqBase, OrdinaryDiffEq, DiffEqBase.EnsembleAnalysis using Test import SDEProblemLibrary: prob_sde_linear, prob_sde_2Dlinear prob = prob_sde_linear prob2 = EnsembleProblem(prob) sim = solve(prob2, SRIW1(), dt = 1 // 2^(3), trajectories = 10, adaptive = false) m = timestep_mean(sim, 3) m2, v = timestep_meanvar(sim, 3) med = timestep_median(sim, 3) quant = timestep_quantile(sim, 0.5, 3) @test quant β‰ˆ med @test m β‰ˆ m2 m3, m4, c = timestep_meancov(sim, 3, 3) @test m β‰ˆ m3 @test v β‰ˆ c m3, m4, c = timestep_meancor(sim, 3, 3) @test c β‰ˆ one(c) vecarr = timeseries_steps_mean(sim) vecarr = timeseries_steps_median(sim) vecarr = timeseries_steps_quantile(sim, 0.5) m_series, v_series = timeseries_steps_meanvar(sim) summ = EnsembleSummary(sim) m4, v4 = m_series.u[3], v_series.u[3] covar_mat = timeseries_steps_meancov(sim)[3, 3] @test m β‰ˆ m4 @test v β‰ˆ v4 @test m β‰ˆ covar_mat[1] @test m β‰ˆ covar_mat[2] @test v β‰ˆ covar_mat[3] @test (get_timestep(sim, 1)...,) == (get_timepoint(sim, 0.0)...,) @test (get_timestep(sim, 2)...,) == (get_timepoint(sim, 1 // 2^(3))...,) @test (get_timestep(sim, 3)...,) == (get_timepoint(sim, 1 // 2^(2))...,) sim = solve(prob2, SRIW1(), dt = 1 // 2^(3), trajectories = 10) m = timepoint_mean(sim, 0.5) med = timepoint_median(sim, 0.6) quant = timepoint_quantile(sim, 0.5, 0.6) @test quant β‰ˆ med m2, v = timepoint_meanvar(sim, 0.5) @test m β‰ˆ m2 m3, m4, c = timepoint_meancov(sim, 0.5, 0.5) @test m β‰ˆ m3 @test v β‰ˆ c m3, m4, c = timepoint_meancor(sim, 0.5, 0.5) @test c β‰ˆ one(c) m_series = timeseries_point_mean(sim, 0:(1 // 2^(3)):1) m_series = timeseries_point_median(sim, 0:(1 // 2^(3)):1) m_series = timeseries_point_quantile(sim, 0.5, 0:(1 // 2^(3)):1) m_series, v_series = timeseries_point_meanvar(sim, 0:(1 // 2^(3)):1) summ = EnsembleSummary(sim, 0:(1 // 2^(3)):1) m5, v5 = m_series.u[5], v_series.u[5] @test m β‰ˆ m5 @test v β‰ˆ v5 m6, m7, v6 = timeseries_point_meancov(sim, 0:(1 // 2^(3)):1, 0:(1 // 2^(3)):1)[5, 5] @test m β‰ˆ m6 @test m β‰ˆ m7 @test v β‰ˆ v6 prob = prob_sde_2Dlinear prob2 = EnsembleProblem(prob) sim = solve(prob2, SRIW1(), dt = 1 // 2^(3), trajectories = 10, adaptive = false) m = timestep_mean(sim, 3) m2, v = timestep_meanvar(sim, 3) med = timestep_median(sim, 3) quant = timestep_quantile(sim, 0.5, 3) @test quant β‰ˆ med @test m β‰ˆ m2 m3, m4, c = timestep_meancov(sim, 3, 3) @test m β‰ˆ m3 @test v β‰ˆ c m3, m4, c = timestep_meancor(sim, 3, 3) @test c β‰ˆ ones(size(c)...) vecarr = timeseries_steps_mean(sim) vecarr = timeseries_steps_median(sim) vecarr = timeseries_steps_quantile(sim, 0.5) m_series, v_series = timeseries_steps_meanvar(sim) summ = EnsembleSummary(sim) m4, v4 = m_series.u[3], v_series.u[3] covar_mat = timeseries_steps_meancov(sim)[3, 3] @test m β‰ˆ m4 @test v β‰ˆ v4 @test m β‰ˆ covar_mat[1] @test m β‰ˆ covar_mat[2] @test v β‰ˆ covar_mat[3] @test (get_timestep(sim, 1)...,) == (get_timepoint(sim, 0.0)...,) @test (get_timestep(sim, 2)...,) == (get_timepoint(sim, 1 // 2^(3))...,) @test (get_timestep(sim, 3)...,) == (get_timepoint(sim, 1 // 2^(2))...,) sim = solve(prob2, SRIW1(), dt = 1 // 2^(3), trajectories = 10) m = timepoint_mean(sim, 0.5) med = timepoint_median(sim, 0.6) quant = timepoint_quantile(sim, 0.5, 0.6) @test quant β‰ˆ med m2, v = timepoint_meanvar(sim, 0.5) @test m β‰ˆ m2 m3, m4, c = timepoint_meancov(sim, 0.5, 0.5) @test m β‰ˆ m3 @test v β‰ˆ c m3, m4, c = timepoint_meancor(sim, 0.5, 0.5) @test c β‰ˆ ones(size(c)...) m_series = timeseries_point_mean(sim, 0:(1 // 2^(3)):1) m_series = timeseries_point_median(sim, 0:(1 // 2^(3)):1) m_series = timeseries_point_quantile(sim, 0.5, 0:(1 // 2^(3)):1) m_series, v_series = timeseries_point_meanvar(sim, 0:(1 // 2^(3)):1) summ = EnsembleSummary(sim, 0:(1 // 2^(3)):1) m5, v5 = m_series.u[5], v_series.u[5] @test m β‰ˆ m5 @test v β‰ˆ v5 m6, m7, v6 = timeseries_point_meancov(sim, 0:(1 // 2^(3)):1, 0:(1 // 2^(3)):1)[5, 5] @test m β‰ˆ m6 @test m β‰ˆ m7 @test v β‰ˆ v6
DiffEqBase
https://github.com/SciML/DiffEqBase.jl.git
[ "MIT" ]
6.157.0
8977ef8249b602e4cb46ddbaf3c51e6adc2958c7
code
733
using OrdinaryDiffEq function f(du, u, p, t) du[1] = 1.01 * u[1] end u0 = [0.0, 0.0] tspan = (0.0, 1.0) prob = ODEProblem(f, u0, tspan) n = 100 initial_conditions = range(0, stop = 1, length = n) function prob_func(prob, i, repeat) prob.u0[1] = initial_conditions[i] prob end ensemble_prob = EnsembleProblem(prob, prob_func = prob_func) sim_1 = solve(ensemble_prob, Tsit5(), EnsembleThreads(), trajectories = 100) sim_2 = solve(ensemble_prob, Tsit5(), EnsembleDistributed(), trajectories = 100) ss_sol_1 = hcat(collect(EnsembleAnalysis.get_timepoint(sim_1, 0))...); ss_sol_2 = hcat(collect(EnsembleAnalysis.get_timepoint(sim_2, 0))...); ss_sol_1[1, :] == initial_conditions ss_sol_2[1, :] == initial_conditions
DiffEqBase
https://github.com/SciML/DiffEqBase.jl.git
[ "MIT" ]
6.157.0
8977ef8249b602e4cb46ddbaf3c51e6adc2958c7
code
632
using Enzyme, EnzymeTestUtils using DiffEqBase: fastlog2, fastpow using Test @testset "Fast pow - Enzyme forward rule" begin @testset for RT in (Duplicated, DuplicatedNoNeed), Tx in (Const, Duplicated), Ty in (Const, Duplicated) x = 3.0 y = 2.0 test_forward(fastpow, RT, (x, Tx), (y, Ty), atol = 0.005, rtol = 0.005) end end @testset "Fast pow - Enzyme reverse rule" begin @testset for RT in (Active,), Tx in (Active,), Ty in (Active,) x = 2.0 y = 3.0 test_reverse(fastpow, RT, (x, Tx), (y, Ty), atol = 0.001, rtol = 0.001) end end
DiffEqBase
https://github.com/SciML/DiffEqBase.jl.git
[ "MIT" ]
6.157.0
8977ef8249b602e4cb46ddbaf3c51e6adc2958c7
code
3630
using OrdinaryDiffEq, Test function lorenz(du, u, p, t) du[1] = 10.0(u[2] - u[1]) du[2] = u[1] * (28.0 - u[3]) - u[2] du[3] = u[1] * u[2] - (8 / 3) * u[3] end u0 = [1.0; 0.0; 0.0] tspan = (0.0, 1.0) prob = ODEProblem(lorenz, u0, tspan) sol = solve(prob, Tsit5(), save_idxs = 1) @inferred solve(prob, Tsit5()) @inferred solve(prob, Tsit5(), save_idxs = 1) @test_broken @inferred(remake(prob, u0 = Float32[1.0; 0.0; 0.0])) == remake(prob, u0 = Float32[1.0; 0.0; 0.0]) @test_broken @inferred(solve(prob, Tsit5(), u0 = Float32[1.0; 0.0; 0.0])) == solve(prob, Tsit5(), u0 = Float32[1.0; 0.0; 0.0]) prob = ODEProblem{true, SciMLBase.FullSpecialize}(lorenz, u0, tspan) @inferred SciMLBase.wrapfun_iip(prob.f) @inferred remake(prob, u0 = [1.0; 0.0; 0.0]) @inferred remake(prob, u0 = Float32[1.0; 0.0; 0.0]) @test_broken @inferred(solve(prob, Tsit5(), u0 = Float32[1.0; 0.0; 0.0])) == solve(prob, Tsit5(), u0 = Float32[1.0; 0.0; 0.0]) prob = ODEProblem(lorenz, Float32[1.0; 0.0; 0.0], tspan) @inferred solve(prob, Tsit5(), save_idxs = 1) @test_broken @inferred(solve(prob, Tsit5(), u0 = [1.0; 0.0; 0.0])) == solve(prob, Tsit5(), u0 = [1.0; 0.0; 0.0]) remake(prob, u0 = [1.0; 0.0; 0.0]) @inferred SciMLBase.wrapfun_iip(prob.f) @test_broken @inferred(ODEFunction{ isinplace(prob), SciMLBase.FunctionWrapperSpecialize}(prob.f)) == ODEFunction{isinplace(prob), SciMLBase.FunctionWrapperSpecialize}(prob.f) @inferred remake(prob, u0 = [1.0; 0.0; 0.0]) @test_broken @inferred(solve(prob, Tsit5(), u0 = [1.0; 0.0; 0.0])) == solve(prob, Tsit5(), u0 = [1.0; 0.0; 0.0]) function f(du, u, p, t) du[1] = p.a du[2] = p.b end const alg = Tsit5() function solve_ode(f::F, p::P, ensemblealg; kwargs...) where {F, P} tspan = (0.0, 1.0) Ξ”t = tspan[2] - tspan[1] dt = 1 / 252 nodes = Int(ceil(Ξ”t / dt) + 1) t = T = [tspan[1] + (i - 1) * dt for i in 1:nodes] # if I do not set {true}, prob type Any... prob = ODEProblem{true}(f, [0.0, 0.0], tspan, p) # prob = ODEProblem(f, [0., 0.], tspan, p) prob_func = (prob, i, repeat) -> begin remake(prob, tspan = (T[i + 1], t[1])) end # ensemble problem odes = EnsembleProblem(prob, prob_func = prob_func) sol = OrdinaryDiffEq.solve(odes, OrdinaryDiffEq.Tsit5(), ensemblealg, trajectories = nodes - 1, saveat = -dt; kwargs...) return sol end @inferred solve_ode(f, (a = 1, b = 1), EnsembleSerial()) @inferred solve_ode(f, (a = 1, b = 1), EnsembleThreads()) @test_broken @inferred(solve_ode(f, (a = 1, b = 1), EnsembleDistributed())) == solve_ode(f, (a = 1, b = 1), EnsembleDistributed()) @test_broken @inferred(solve_ode(f, (a = 1, b = 1), EnsembleSplitThreads())) == solve_ode(f, (a = 1, b = 1), EnsembleSplitThreads()) @inferred solve_ode(f, (a = 1, b = 1), EnsembleSerial(), save_idxs = 1) @inferred solve_ode(f, (a = 1, b = 1), EnsembleThreads(), save_idxs = 1) @test_broken @inferred(solve_ode( f, (a = 1, b = 1), EnsembleDistributed(), save_idxs = 1)) == solve_ode(f, (a = 1, b = 1), EnsembleDistributed(), save_idxs = 1) @test_broken @inferred(solve_ode( f, (a = 1, b = 1), EnsembleSplitThreads(), save_idxs = 1)) == solve_ode(f, (a = 1, b = 1), EnsembleSplitThreads(), save_idxs = 1) using StochasticDiffEq, Test u0 = 1 / 2 ff(u, p, t) = u gg(u, p, t) = u dt = 1 // 2^(4) tspan = (0.0, 1.0) prob = SDEProblem(ff, gg, u0, (0.0, 1.0)) sol = solve(prob, EM(), dt = dt) @inferred solve(prob, EM(), dt = dt) @inferred solve(prob, EM(), dt = dt, save_idxs = 1)
DiffEqBase
https://github.com/SciML/DiffEqBase.jl.git
[ "MIT" ]
6.157.0
8977ef8249b602e4cb46ddbaf3c51e6adc2958c7
code
790
using OrdinaryDiffEq, Test function lorenz(du, u, p, t) du[1] = 10.0(u[2] - u[1]) du[2] = u[1] * (28.0 - u[3]) - u[2] du[3] = u[1] * u[2] - (8 / 3) * u[3] end u0 = [1.0; 0.0; 0.0] tspan = (0.0, 100.0) prob = ODEProblem(lorenz, u0, tspan) @test_nowarn sol = solve(prob, Tsit5(), reltol = 1e-6) sol = solve(prob, Tsit5(), rel_tol = 1e-6, kwargshandle = DiffEqBase.KeywordArgWarn) @test_logs (:warn, DiffEqBase.KWARGWARN_MESSAGE) sol=solve( prob, Tsit5(), rel_tol = 1e-6, kwargshandle = DiffEqBase.KeywordArgWarn) @test_throws DiffEqBase.CommonKwargError sol=solve(prob, Tsit5(), rel_tol = 1e-6) prob = ODEProblem(lorenz, u0, tspan, test = 2.0, kwargshandle = DiffEqBase.KeywordArgWarn) @test_logs (:warn, DiffEqBase.KWARGWARN_MESSAGE) sol=solve(prob, Tsit5(), reltol = 1e-6)
DiffEqBase
https://github.com/SciML/DiffEqBase.jl.git
[ "MIT" ]
6.157.0
8977ef8249b602e4cb46ddbaf3c51e6adc2958c7
code
810
using OrdinaryDiffEq using LabelledArrays function f(out, du, u, p, t) out.x = -0.04u.x + 1e4 * u.y * u.z - du.x out.y = +0.04u.x - 3e7 * u.y^2 - 1e4 * u.y * u.z - du.y out.z = u.x + u.y + u.z - 1.0 end uβ‚€ = LVector(x = 1.0, y = 0.0, z = 0.0) duβ‚€ = LVector(x = -0.04, y = 0.04, z = 0.0) tspan = (0.0, 100000.0) differential_vars = LVector(x = true, y = true, z = false) prob = DAEProblem(f, duβ‚€, uβ‚€, tspan, differential_vars = differential_vars) sol = solve(prob, DImplicitEuler()) function f1(du, u, p, t) du.x .= -1 .* u.x .* u.y .* p[1] du.y .= -1 .* u.y .* p[2] end const n = 4 u_0 = @LArray fill(1000.0, 2 * n) (x = (1:n), y = ((n + 1):(2 * n))) p = [0.1, 0.1] prob1 = ODEProblem(f1, u_0, (0, 100.0), p) sol = solve(prob1, Rodas5()); sol = solve(prob1, Rodas5(autodiff = false));
DiffEqBase
https://github.com/SciML/DiffEqBase.jl.git
[ "MIT" ]
6.157.0
8977ef8249b602e4cb46ddbaf3c51e6adc2958c7
code
2354
using ModelingToolkit, OrdinaryDiffEq, SteadyStateDiffEq, Test using ForwardDiff @variables t x(t) y(t) eqs = [0 ~ x - y 0 ~ y - x] @named sys = ODESystem(eqs, t) sys = structural_simplify(sys) prob = ODEProblem(sys, Pair[], (0, 10.0)) sol = solve(prob, Tsit5()) @test sol[x] == [0.0, 0.0] @test sol[y] == [0.0, 0.0] for kwargs in [ Dict(:saveat => 0:0.1:1), Dict(:save_start => false), Dict(:save_end => false) ] sol = solve(prob, kwargs...) init_integ = init(prob, kwargs...) solve!(init_integ) step_integ = init(prob, kwargs...) step!(step_integ, prob.tspan[end] - prob.tspan[begin]) @test sol.u[end] == init_integ.u @test sol.t[end] == init_integ.t @test sol.u[end] == step_integ.u @test sol.t[end] == step_integ.t end @variables t x y eqs = [0 ~ x - y 0 ~ y - x] @named sys = NonlinearSystem(eqs, [x, y], []) sys = structural_simplify(sys) prob = NonlinearProblem(sys, []) sol = solve(prob, DynamicSS(Tsit5())) @test sol[x] == 0.0 @test sol[y] == 0.0 # https://github.com/SciML/NonlinearSolve.jl/issues/387 using NonlinearSolve function unsat(du, u, p) du[1] = 1 end unsat_f = NonlinearFunction(unsat; resid_prototype = zeros(1)) unsatprob = NonlinearLeastSquaresProblem(unsat_f, nothing) sol = solve(unsatprob) # Success @test sol.retcode == SciMLBase.ReturnCode.Failure @test sol.resid == [1.0] # Issue#2664 @testset "remake type promotion with empty initial conditions" begin @parameters P @variables t x(t) D = Differential(t) # numerical ODE: xβ€²(t) = P with x(0) = 0 sys_num = structural_simplify(ODESystem([D(x) ~ P], t, [x], [P]; name = :sys)) prob_num_uninit = ODEProblem(sys_num, [x => 0.0], (0.0, 1.0), [P => NaN]) # uninitialized problem x_at_1_num(P) = solve(remake(prob_num_uninit; p = [sys_num.P => P]), Tsit5())( 1.0, idxs = x) # analytical solution: x(t) = P*t sys_anal = structural_simplify(ODESystem([x ~ P * t], t, [x], [P]; name = :sys)) prob_anal_uninit = ODEProblem(sys_anal, [], (0.0, 1.0), [P => NaN]) x_at_1_anal(P) = solve(remake(prob_anal_uninit; p = [sys_anal.P => P]), Tsit5())( 1.0, idxs = x) @test_nowarn x_at_1_num(1.0) @test_nowarn x_at_1_anal(1.0) @test_nowarn ForwardDiff.derivative(x_at_1_num, 1.0) @test_nowarn ForwardDiff.derivative(x_at_1_anal, 1.0) end
DiffEqBase
https://github.com/SciML/DiffEqBase.jl.git
[ "MIT" ]
6.157.0
8977ef8249b602e4cb46ddbaf3c51e6adc2958c7
code
160
using OrdinaryDiffEq, Test f = (u, p, t) -> u * p[1] prob = ODEProblem(f, 1.01, (0.0, 1.0)) @test_throws SciMLBase.NullParameterIndexError solve(prob, Tsit5())
DiffEqBase
https://github.com/SciML/DiffEqBase.jl.git
[ "MIT" ]
6.157.0
8977ef8249b602e4cb46ddbaf3c51e6adc2958c7
code
446
using OrdinaryDiffEq, Test function lorenz(du, u, p, t) du[1] = 10.0(u[2] - u[1]) du[2] = u[1] * (28.0 - u[3]) - u[2] du[3] = u[1] * u[2] - (8 / 3) * u[3] end u0 = [1.0; 0.0; 0.0] tspan = (0.0, 100.0) prob = ODEProblem(lorenz, u0, tspan, alg = Tsit5()) @test_nowarn sol = solve(prob, reltol = 1e-6) sol = solve(prob, reltol = 1e-6) @test sol.alg isa Tsit5 new_u0 = rand(3) sol = solve(prob, u0 = new_u0) @test sol.prob.u0 === new_u0
DiffEqBase
https://github.com/SciML/DiffEqBase.jl.git
[ "MIT" ]
6.157.0
8977ef8249b602e4cb46ddbaf3c51e6adc2958c7
code
2529
using OrdinaryDiffEq, StochasticDiffEq, Test, Sundials f(u, p, t) = 2u u0 = 0.5 tspan = (0.0, 1.0) prob = ODEProblem(f, u0, tspan) sol = solve(prob, Tsit5()) function f(du, u, p, t) du .= 2.0 * u end prob = ODEProblem(f, u0, tspan) @test_throws DiffEqBase.IncompatibleInitialConditionError sol=solve(prob, Tsit5()) prob = ODEProblem{false}(f, u0, tspan) sol = solve(prob, Tsit5()) sol = solve(prob, nothing, alg = Tsit5()) sol = init(prob, nothing, alg = Tsit5()) prob = ODEProblem{false}(f, 1.0 + im, tspan) @test_throws DiffEqBase.ComplexSupportError solve(prob, CVODE_Adams()) @test_throws DiffEqBase.ProblemSolverPairingError solve(prob, DFBDF()) @test_throws DiffEqBase.NonSolverError solve(prob, 5.0) prob = ODEProblem{false}(f, u0, (nothing, nothing)) @test_throws DiffEqBase.NoTspanError solve(prob, Tsit5()) prob = ODEProblem{false}(f, u0, (NaN, 1.0)) @test_throws DiffEqBase.NaNTspanError solve(prob, Tsit5()) prob = ODEProblem{false}(f, u0, (1.0, NaN)) @test_throws DiffEqBase.NaNTspanError solve(prob, Tsit5()) prob = ODEProblem{false}(f, Any[1.0, 1.0f0], tspan) @test_throws DiffEqBase.NonConcreteEltypeError solve(prob, Tsit5()) prob = ODEProblem{false}(f, (1.0, 1.0f0), tspan) @test_throws DiffEqBase.TupleStateError solve(prob, Tsit5()) prob = ODEProblem{false}(f, u0, (0.0 + im, 1.0)) @test_throws DiffEqBase.ComplexTspanError solve(prob, Tsit5()) for u0 in ([0.0, 0.0], nothing) fmm = ODEFunction(f, mass_matrix = zeros(3, 3)) prob = ODEProblem(fmm, u0, (0.0, 1.0)) @test_throws DiffEqBase.IncompatibleMassMatrixError solve(prob, Tsit5()) end # Allow empty mass matrix for empty u0 fmm = ODEFunction((du, u, t) -> nothing, mass_matrix = zeros(0, 0)) prob = ODEProblem(fmm, nothing, (0.0, 1.0)) sol = solve(prob, Tsit5()) @test isa(sol, DiffEqBase.ODESolution) f(du, u, p, t) = du .= 1.01u function g(du, u, p, t) du[1, 1] = 0.3u[1] du[1, 2] = 0.6u[1] du[1, 3] = 0.9u[1] du[1, 4] = 0.12u[1] du[2, 1] = 1.2u[2] du[2, 2] = 0.2u[2] du[2, 3] = 0.3u[2] du[2, 4] = 1.8u[2] end prob = SDEProblem(f, g, randn(ComplexF64, 2), (0.0, 1.0), noise_rate_prototype = complex(zeros(2, 4)), noise = StochasticDiffEq.RealWienerProcess(0.0, zeros(3))) @test_throws DiffEqBase.NoiseSizeIncompatabilityError solve(prob, LambaEM()) function g!(du, u, p, t) du[1] .= u[1] + ones(3, 3) du[2] .= ones(3, 3) end u0 = [zeros(3, 3), zeros(3, 3)] prob = ODEProblem(g!, u0, (0, 1.0)) @test_throws DiffEqBase.NonNumberEltypeError solve(prob, Tsit5())
DiffEqBase
https://github.com/SciML/DiffEqBase.jl.git
[ "MIT" ]
6.157.0
8977ef8249b602e4cb46ddbaf3c51e6adc2958c7
code
381
using OrdinaryDiffEq, ForwardDiff, StaticArrays, Test f(u, p, t) = copy(u) du1 = ForwardDiff.derivative(5.0) do x prob = ODEProblem(f, [x], (0.0, 1.0), nothing) sol = solve(prob, Tsit5()) sol.u[end] end du2 = ForwardDiff.derivative(5.0) do x prob = ODEProblem(f, SVector(x), (0.0, 1.0), nothing) sol = solve(prob, Tsit5()) sol.u[end] end @test du1 β‰ˆ du2
DiffEqBase
https://github.com/SciML/DiffEqBase.jl.git
[ "MIT" ]
6.157.0
8977ef8249b602e4cb46ddbaf3c51e6adc2958c7
code
998
# ncondition tests using OrdinaryDiffEq, Test function f(u, p, t) return 5 * u end u0 = [1.0, 1.0] tspan = (0.0, 1.0) prob = ODEProblem(f, u0, tspan) x = Ref(0) condition = function (u, t, integrator) x[] += 1 return 0 end affect! = function (integrator) end cb = ContinuousCallback(condition, affect!) sol = solve(prob, Vern9(), callback = cb) @test x[] == sol.stats.ncondition condition = function (u, t, integrator) x[] += 1 return t - 0.46 end x[] = 0 cb = ContinuousCallback(condition, affect!) sol = solve(prob, Vern9(), callback = cb) @test x[] == sol.stats.ncondition condition = function (u, t, integrator) x[] += 1 return 1 end x[] = 0 cb = ContinuousCallback(condition, affect!) sol = solve(prob, Vern9(), callback = cb) @test x[] == sol.stats.ncondition condition = function (u, t, integrator) x[] += 1 return true end x[] = 0 cb = DiscreteCallback(condition, affect!) sol = solve(prob, Vern9(), callback = cb) @test x[] == sol.stats.ncondition
DiffEqBase
https://github.com/SciML/DiffEqBase.jl.git
[ "MIT" ]
6.157.0
8977ef8249b602e4cb46ddbaf3c51e6adc2958c7
code
586
using OrdinaryDiffEq, DataFrames, Test, SymbolicIndexingInterface f_2dlinear = (du, u, p, t) -> du .= 1.01u; prob = ODEProblem(f_2dlinear, rand(2, 2), (0.0, 1.0)); sol1 = solve(prob, Euler(); dt = 1 // 2^(4)); df = DataFrame(sol1) @test names(df) == ["timestamp", "value1", "value2", "value3", "value4"] prob = ODEProblem( ODEFunction( f_2dlinear, sys = SymbolicIndexingInterface.SymbolCache([:a, :b, :c, :d], [], :t)), rand(2, 2), (0.0, 1.0)); sol2 = solve(prob, Euler(); dt = 1 // 2^(4)); df = DataFrame(sol2) @test names(df) == ["timestamp", "a", "b", "c", "d"]
DiffEqBase
https://github.com/SciML/DiffEqBase.jl.git
[ "MIT" ]
6.157.0
8977ef8249b602e4cb46ddbaf3c51e6adc2958c7
code
197
using Unitful, OrdinaryDiffEq, Test f(du, u, p, t) = du .= 3 * u"1/s" * u prob = ODEProblem(f, [2.0u"m"], (0.0u"s", Inf * u"s")) intg = init(prob, Tsit5()) @test_nowarn step!(intg, 0.02u"s", true)
DiffEqBase
https://github.com/SciML/DiffEqBase.jl.git
[ "MIT" ]
6.157.0
8977ef8249b602e4cb46ddbaf3c51e6adc2958c7
code
744
using OrdinaryDiffEq, Test my_f(u, p, t) = u my_f!(du, u, p, t) = du .= u ode = ODEProblem(my_f, [1.0], (0.0, 1.0)) integrator = init(ode, Tsit5()) @test SciMLBase.unwrapped_f(integrator.f.f) === my_f ode = ODEProblem(my_f!, [1.0], (0.0, 1.0)) integrator = init(ode, Tsit5()) @test SciMLBase.unwrapped_f(integrator.f.f) === my_f! using OrdinaryDiffEq, ForwardDiff, Measurements x = 1.0 Β± 0.0 f = (du, u, p, t) -> du .= u tspan = (0.0, 1.0) prob = ODEProblem(f, [x], tspan) # Should not error during problem construction but should be unwrapped integ = init(prob, Tsit5(), dt = 0.1) @test integ.f.f === f # Handle functional initial conditions prob = ODEProblem((dx, x, p, t) -> (dx .= 0), (p, t) -> zeros(2), (0, 10)) solve(prob, TRBDF2())
DiffEqBase
https://github.com/SciML/DiffEqBase.jl.git
[ "MIT" ]
6.157.0
8977ef8249b602e4cb46ddbaf3c51e6adc2958c7
code
2334
using OrdinaryDiffEq, CUDA, LinearAlgebra, Test, StaticArrays function f(u, p, t) A * u end function f(du, u, p, t) mul!(du, A, u) end function jac(J, u, p, t) J .= A end function jac(u, p, t) A end function tgrad(du, u, p, t) du .= 0 end function tgrad(u, p, t) zero(u) end ff = ODEFunction(f, jac = jac, tgrad = tgrad) CUDA.allowscalar(false) A = cu(-rand(3, 3)) u0 = cu([1.0; 0.0; 0.0]) tspan = (0.0f0, 100.0f0) prob = ODEProblem(ff, u0, tspan) sol = solve(prob, Tsit5()) @test solve(prob, Rosenbrock23()).retcode == ReturnCode.Success solve(prob, Rosenbrock23(autodiff = false)); prob_oop = ODEProblem{false}(ff, u0, tspan) CUDA.allowscalar(false) sol = solve(prob_oop, Tsit5()) @test solve(prob_oop, Rosenbrock23()).retcode == ReturnCode.Success @test solve(prob_oop, Rosenbrock23(autodiff = false)).retcode == ReturnCode.Success prob_nojac = ODEProblem(f, u0, tspan) @test solve(prob_nojac, Rosenbrock23()).retcode == ReturnCode.Success @test solve(prob_nojac, Rosenbrock23(autodiff = false)).retcode == ReturnCode.Success @test solve(prob_nojac, Rosenbrock23(autodiff = false, diff_type = Val{:central})).retcode == ReturnCode.Success @test solve(prob_nojac, Rosenbrock23(autodiff = false, diff_type = Val{:complex})).retcode == ReturnCode.Success #= prob_nojac_oop = ODEProblem{false}(f,u0,tspan) DiffEqBase.prob2dtmin(prob_nojac_oop) @test_broken solve(prob_nojac_oop,Rosenbrock23()).retcode == ReturnCode.Success @test_broken solve(prob_nojac_oop,Rosenbrock23(autodiff=false)).retcode == ReturnCode.Success @test_broken solve(prob_nojac_oop,Rosenbrock23(autodiff=false,diff_type = Val{:central})).retcode == ReturnCode.Success @test_broken solve(prob_nojac_oop,Rosenbrock23(autodiff=false,diff_type = Val{:complex})).retcode == ReturnCode.Success =# # Complex Numbers Adaptivity DifferentialEquations.jl#460 f_complex(u, nothing, t) = 5.0f-1 .* u u0 = cu(rand(32, 32) .+ 1im * rand(32, 32)); prob = ODEProblem(f_complex, u0, (0.0f0, 1.0f0)) @test_nowarn sol = solve(prob, Tsit5()) # Calculating norm of Static Arrays in GPU kernel DiffEqBase.jl#864 function test_SA_norm(u::T) where {T <: AbstractArray} @cushow DiffEqBase.ODE_DEFAULT_NORM(u, 1.0) return nothing end u = @SVector rand(100) @testset "Static arrays norm on GPU" begin @cuda test_SA_norm(u) end
DiffEqBase
https://github.com/SciML/DiffEqBase.jl.git
[ "MIT" ]
6.157.0
8977ef8249b602e4cb46ddbaf3c51e6adc2958c7
code
890
using BenchmarkTools, CUDA, DiffEqBase, Test, LinearAlgebra CUDA.allowscalar(false) du = cu(rand(4)) u = cu(rand(4)) uprev = cu(rand(4)) const TERMINATION_CONDITIONS = [ SteadyStateDiffEqTerminationMode(), SimpleNonlinearSolveTerminationMode(), NormTerminationMode(), RelTerminationMode(), RelNormTerminationMode(), AbsTerminationMode(), AbsNormTerminationMode(), RelSafeTerminationMode(), AbsSafeTerminationMode(), RelSafeBestTerminationMode(), AbsSafeBestTerminationMode() ] @testset "Termination Conditions: Allocations" begin @testset "Mode: $(tcond)" for tcond in TERMINATION_CONDITIONS for nfn in (Base.Fix1(maximum, abs), Base.Fix2(norm, 2), Base.Fix2(norm, Inf)) tcond = DiffEqBase.set_termination_mode_internalnorm(tcond, nfn) @test_nowarn DiffEqBase.check_convergence(tcond, du, u, uprev, 1e-3, 1e-3) end end end
DiffEqBase
https://github.com/SciML/DiffEqBase.jl.git
[ "MIT" ]
6.157.0
8977ef8249b602e4cb46ddbaf3c51e6adc2958c7
docs
1087
# DiffEqBase.jl [![Join the chat at https://gitter.im/JuliaDiffEq/Lobby](https://badges.gitter.im/JuliaDiffEq/Lobby.svg)](https://gitter.im/JuliaDiffEq/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Build Status](https://github.com/SciML/DiffEqBase.jl/workflows/CI/badge.svg)](https://github.com/SciML/DiffEqBase.jl/actions?query=workflow%3ACI) [![Build status](https://badge.buildkite.com/99cbd352c336779c3117e4da61255a1ed8e7e7c084c3c2516c.svg)](https://buildkite.com/julialang/diffeqbase-dot-jl) DiffEqBase.jl is a component package in the DiffEq ecosystem. It holds the common types and utility functions which are shared by other solver packages to promote code reuse in the differential equation solver code. Users interested in using this functionality in full should check out [DifferentialEquations.jl](https://github.com/JuliaDiffEq/DifferentialEquations.jl) The documentation for the interfaces here can be found in [DiffEqDocs.jl](https://docs.sciml.ai/DiffEqDocs/dev/) and [DiffEqDevDocs.jl](https://docs.sciml.ai/DiffEqDevDocs/dev/).
DiffEqBase
https://github.com/SciML/DiffEqBase.jl.git
[ "MIT" ]
0.4.9
b15bc48d53bf910cb0dc59ec92d7c147eeda1de1
code
4702
module MemPool using Serialization, Sockets, Random import Serialization: serialize, deserialize export DRef, FileRef, poolset, poolget, mmwrite, mmread, cleanup import .Threads: ReentrantLock using ScopedValues using ConcurrentCollections ## Wrapping-unwrapping of payloads: struct MMWrap # wrap an object to use custom # memory-mapping/fast serializer x::Any end # Wrapping is an implementation detail unwrap_payload will be called by poolget # on every fetched value this will give back the value as it was saved. unwrap_payload(x) = x unwrap_payload(x::Tuple) = map(unwrap_payload, x) unwrap_payload(x::MMWrap) = unwrap_payload(x.x) function serialize(io::AbstractSerializer, w::MMWrap) # ticket to mm land mmwrite(io, w.x) end function deserialize(io::AbstractSerializer, T::Type{MMWrap}) MMWrap(mmread(T, io)) # gotta keep that wrapper on end # Unwrapping FileRef == reading the file. # This allows communicating only the file name across processors, # the receiver process will simply read from file while unwrapping struct FileRef file::String size::UInt end function FileRef(file; pid=nothing) size = try UInt(Base.stat(file).size) catch err @debug "Failed to query FileRef size of $file" UInt(0) end return FileRef(file, size) end unwrap_payload(f::FileRef) = unwrap_payload(open(deserialize, f.file, "r+")) approx_size(f::FileRef) = f.size include("io.jl") include("lock.jl") include("read_write_lock.jl") include("clock.jl") include("datastore.jl") """ approx_size(d) Returns the size of `d` in bytes used for accounting in MemPool datastore. """ function approx_size(d::T) where T if Base.datatype_pointerfree(T) return sizeof(d) else Base.summarysize(d) # note: this is accurate but expensive end end function approx_size(d::Union{Base.BitInteger, Float16, Float32, Float64}) sizeof(d) end function approx_size(d::AbstractDict{K,V}) where {K,V} N = length(d) ksz = approx_size(K, N, keys(d)) vsz = approx_size(V, N, values(d)) ksz + vsz + 8*N end function approx_size(d::AbstractArray{T}) where T isempty(d) && return 0 isbitstype(T) && return sizeof(d) approx_size(T, length(d), d) end function approx_size(T, L, d) fl = fixedlength(T) if fl > 0 return L * fl elseif T === Any return L * 64 # approximation (override with a more specific method where exact calculation is needed) elseif isempty(d) return 0 else return sum(approx_size(x) for x in d) end end function approx_size(xs::AbstractArray{String}) # doesn't check for redundant references, but # really super fast in comparison to summarysize s = 0 for x in xs s += sizeof(x) end s + 4 * length(xs) end function approx_size(s::String) sizeof(s)+sizeof(Int) # sizeof(Int) for 64 bit vs 32 bit systems end function approx_size(s::Symbol) sizeof(s)+sizeof(Int) end function __init__() SESSION[] = "sess-" * randstring(6) DISKCACHE_CONFIG[] = diskcache_config = DiskCacheConfig() setup_global_device!(diskcache_config) if haskey(ENV, "JULIA_MEMPOOL_MEMORY_RESERVED") MEM_RESERVED[] = parse(UInt, ENV["JULIA_MEMPOOL_MEMORY_RESERVED"]) end # Ensure we cleanup all references atexit(exit_hook) end function exit_hook() exit_flag[] = true evict_delay = DISKCACHE_CONFIG[].evict_delay kill_counter = evict_delay function datastore_empty(do_lock=true) with_lock(datastore_lock, do_lock) do all(ref->storage_read(ref).root isa CPURAMDevice, values(datastore)) end end # Wait for datastore objects to naturally expire GC.gc() yield() while kill_counter > 0 && !datastore_empty() GC.gc() sleep(1) kill_counter -= 1 end # Forcibly evict remaining objects with_lock(datastore_lock) do if !datastore_empty(false) @debug "Failed to cleanup datastore after $evict_delay seconds\nForcibly evicting all entries" for id in collect(keys(datastore)) state = MemPool.datastore[id] device = storage_read(state).root if device !== nothing @debug "Evicting ref $id with device $device" try delete_from_device!(device, state, id) catch end end delete!(MemPool.datastore, id) end end end if ispath(default_dir()) rm(default_dir(); recursive=true) end end precompile(exit_hook, ()) end # module
MemPool
https://github.com/JuliaData/MemPool.jl.git
[ "MIT" ]
0.4.9
b15bc48d53bf910cb0dc59ec92d7c147eeda1de1
code
1642
## # This file is a part of MemPool.jl. License is MIT # # Based upon https://github.com/google/benchmark, which is licensed under Apache v2: # https://github.com/google/benchmark/blob/master/LICENSE # # In compliance with the Apache v2 license, here are the original copyright notices: # Copyright 2015 Google Inc. All rights reserved. ## struct TimeSpec tv_sec::UInt64 # time_t tv_nsec::UInt64 end maketime(ts) = ts.tv_sec * UInt(1e9) + ts.tv_nsec # From bits/times.h on a Linux system # Check if those are the same on BSD if Sys.islinux() const CLOCK_MONOTONIC = Cint(1) const CLOCK_PROCESS_CPUTIME_ID = Cint(2) const CLOCK_THREAD_CPUTIME_ID = Cint(3) const CLOCK_MONOTONIC_COARSE = Cint(6) elseif Sys.isfreebsd() # atleast on FreeBSD 11.1 const CLOCK_MONOTONIC = Cint(4) const CLOCK_PROCESS_CPUTIME_ID = Cint(14) elseif Sys.isapple() # Version 10.12 required const CLOCK_MONOTONIC = Cint(6) const CLOCK_PROCESS_CPUTIME_ID = Cint(12) end @static if Sys.isunix() @inline function clock_gettime(cid) ts = Ref{TimeSpec}() ccall(:clock_gettime, Cint, (Cint, Ref{TimeSpec}), cid, ts) return ts[] end @inline function realtime() maketime(clock_gettime(CLOCK_MONOTONIC)) end @inline function cputime() maketime(clock_gettime(CLOCK_PROCESS_CPUTIME_ID)) end end @static if Sys.islinux() @inline function cputhreadtime() maketime(clock_gettime(CLOCK_THREAD_CPUTIME_ID)) end @inline function fasttime() maketime(clock_gettime(CLOCK_MONOTONIC_COARSE)) end else @inline cputhreadtime() = time_ns() # HACK @inline fasttime() = time_ns() # HACK end
MemPool
https://github.com/JuliaData/MemPool.jl.git
[ "MIT" ]
0.4.9
b15bc48d53bf910cb0dc59ec92d7c147eeda1de1
code
25068
using Distributed mutable struct DRef owner::Int id::Int size::UInt function DRef(owner, id, size; doref::Bool=true) d = new(owner, id, size) doref && poolref(d) finalizer(poolunref, d) d end end Base.:(==)(d1::DRef, d2::DRef) = (d1.owner == d2.owner) && (d1.id == d2.id) Base.hash(d::DRef, h::UInt) = hash(d.id, hash(d.owner, h)) const DRefID = Tuple{Int,Int} const DEBUG_REFCOUNTING = Ref(false) function Serialization.serialize(io::AbstractSerializer, d::DRef) Serialization.serialize_cycle_header(io, d) serialize(io, d.owner) serialize(io, d.id) serialize(io, d.size) _pooltransfer_send(io, d) end function _pooltransfer_send(io::Distributed.ClusterSerializer, d::DRef) pid = Distributed.worker_id_from_socket(io.io) if pid != -1 pooltransfer_send_local(d, pid) return end pid = Distributed.worker_id_from_socket(io) if pid != -1 pooltransfer_send_local(d, pid) return end @warn "Couldn't determine destination for DRef serialization\nRefcounting will be broken" end function _pooltransfer_send(io::AbstractSerializer, d::DRef) # We assume that we're not making copies of the serialized DRef # N.B. This is not guaranteed to be correct warn_dref_serdes() poolref(d) end function Serialization.deserialize(io::AbstractSerializer, dt::Type{DRef}) # Construct the object nf = fieldcount(dt) d = ccall(:jl_new_struct_uninit, Any, (Any,), dt) Serialization.deserialize_cycle(io, d) for i in 1:nf tag = Int32(read(io.io, UInt8)::UInt8) if tag != Serialization.UNDEFREF_TAG ccall(:jl_set_nth_field, Cvoid, (Any, Csize_t, Any), d, i-1, Serialization.handle_deserialize(io, tag)) end end _pooltransfer_recv(io, d) return d end function _pooltransfer_recv(io::Distributed.ClusterSerializer, d) # Add a new reference manually, and unref on finalization DEBUG_REFCOUNTING[] && _enqueue_work(Core.print, "<- (", d.owner, ", ", d.id, ") at ", myid(), "\n") poolref(d, true) finalizer(poolunref, d) end function _pooltransfer_recv(io::AbstractSerializer, d) # N.B. This is not guaranteed to be correct warn_dref_serdes() poolref(d) finalizer(poolunref, d) # Matches the poolref during serialization poolunref(d) end warn_dref_serdes() = @warn "Performing serialization of DRef with unknown serializer\nThis may fail or produce incorrect results" maxlog=1 # Ensure we call the DRef ctor Base.copy(d::DRef) = DRef(d.owner, d.id, d.size) function Base.deepcopy_internal(d::DRef, stackdict::IdDict) if haskey(stackdict, d) return stackdict[d] end return DRef(d.owner, d.id, d.size) end include("storage.jl") storage_read(state::RefState) = @atomic :acquire state.storage "Atomically replaces `state.storage` with the result of `f(state.storage)`." function storage_rcu!(f, state::RefState) while true orig_sstate = @atomic :acquire state.storage # Generate new state based on old state new_sstate = f(orig_sstate) if new_sstate === orig_sstate throw(ConcurrencyViolationError("Attempted to atomically replace StorageState with itself")) end succ = (@atomicreplace :acquire_release :acquire state.storage orig_sstate => new_sstate).success if succ return new_sstate end end end using .Threads # Global ID counter for generating local DRef IDs const id_counter = Atomic{Int}(0) # Lock for access to `datastore` const datastore_lock = NonReentrantLock() # Data store, maps local DRef ID to RefState, which holds the reference's values const datastore = Dict{Int,RefState}() struct RefCounters # Count of # of workers holding at least one reference worker_counter::Atomic{Int} # Count of # of local references local_counter::Atomic{Int} # Per other worker, count of # of sent references to other worker send_counters::Dict{Int,Int} # Per other worker, count of # of received references from other worker recv_counters::Dict{Int,Int} # Locks for access to send/recv counters tx_lock::NonReentrantLock end function RefCounters() rc = maybepop!(REFCOUNTERS_CACHE) if rc === nothing rc = RefCounters(Atomic{Int}(0), Atomic{Int}(0), Dict{Int,Int}(), Dict{Int,Int}(), NonReentrantLock()) else Threads.atomic_sub!(REFCOUNTERS_STORED, 1) rc = something(rc) end return rc end function refcounters_replace!(rc) if REFCOUNTERS_STORED[] < REFCOUNTERS_CACHE_MAX rc.worker_counter[] = 0 rc.local_counter[] = 0 empty!(rc.send_counters) empty!(rc.recv_counters) Threads.atomic_add!(REFCOUNTERS_STORED, 1) push!(REFCOUNTERS_CACHE, rc) end end const REFCOUNTERS_CACHE = ConcurrentStack{RefCounters}() const REFCOUNTERS_STORED = Threads.Atomic{Int}(0) const REFCOUNTERS_CACHE_MAX = 2^12 Base.show(io::IO, ctrs::RefCounters) = with_lock(ctrs.tx_lock) do print(io, string(ctrs)) end function Base.string(ctrs::RefCounters) "RefCounters(workers=" * string(ctrs.worker_counter[]) * ", local=" * string(ctrs.local_counter[]) * ", send=" * string(sum(values(ctrs.send_counters))) * ", recv=" * string(sum(values(ctrs.recv_counters))) * ")" end # Lock for access to `datastore_counters` const datastore_counters_lock = NonReentrantLock() # Per-DRef counters const datastore_counters = Dict{DRefID, RefCounters}() # Flag set when this session is exiting const exit_flag = Ref{Bool}(false) """ Updates `local_counter` by `adj`, and checks if the ref is no longer present on this worker. If so, all sent references are collected and sent to the owner. """ function update_and_check_local!(ctrs, owner, id, adj) if atomic_add!(ctrs.local_counter, adj) == 0 - adj transfers = nothing @safe_lock_spin ctrs.tx_lock begin DEBUG_REFCOUNTING[] && _enqueue_work(Core.print, "LL (", owner, ", ", id, ") at ", myid(), " with ", string(ctrs), "\n"; gc_context=true) transfers = copy(ctrs.send_counters) empty!(ctrs.send_counters) end if myid() == owner # N.B. Immediately update counters to prevent hidden counts in send queue poolunref_owner(id, transfers; gc_context=true) else # Tell the owner we hold no more references _enqueue_work(remotecall_wait, poolunref_owner, owner, id, transfers; gc_context=true) @safe_lock_spin datastore_counters_lock begin delete!(datastore_counters, (owner, id)) end end end end """ Updates `worker_counter` by `adj`, and checks if the ref can be freed. If it can be freed, it is immediately deleted from the datastore. """ function update_and_check_owner!(ctrs, id, adj) with_lock(ctrs.tx_lock) do tx_free = true for pid in keys(ctrs.recv_counters) if ctrs.recv_counters[pid] == 0 delete!(ctrs.recv_counters, pid) else tx_free = false break end end if atomic_add!(ctrs.worker_counter, adj) == 0 - adj DEBUG_REFCOUNTING[] && _enqueue_work(Core.print, "OO (", myid(), ", ", id, ") with ", string(ctrs), "\n"; gc_context=true) if tx_free datastore_delete(id) return true end return false end end end # HACK: Force remote GC messages to be executed serially mutable struct SendQueue queue::Channel{Any} @atomic task::Union{Task,Nothing} processing::Bool end const SEND_QUEUE = SendQueue(Channel(typemax(Int)), nothing, false) function _enqueue_work(f, args...; gc_context=false) if SEND_QUEUE.task === nothing task = Task() do while true try work, _args = take!(SEND_QUEUE.queue) SEND_QUEUE.processing = true work(_args...) SEND_QUEUE.processing = false catch err exit_flag[] && continue err isa ProcessExitedException && continue # TODO: Remove proc from counters iob = IOContext(IOBuffer(), :color=>true) println(iob, "Error in enqueued work:") Base.showerror(iob, err) seek(iob.io, 0) write(stderr, iob) end end end if @atomicreplace(SEND_QUEUE.task, nothing => task).success schedule(task) errormonitor(task) end end if gc_context while true GC.safepoint() if trylock(SEND_QUEUE.queue) try put!(SEND_QUEUE.queue, (f, args)) break finally unlock(SEND_QUEUE.queue) end end end else put!(SEND_QUEUE.queue, (f, args)) end end function poolref(d::DRef, recv=false) DEBUG_REFCOUNTING[] && _enqueue_work(Core.print, "^^ (", d.owner, ", ", d.id, ") at ", myid(), "\n") ctrs = with_lock(datastore_counters_lock) do # This might be a new DRef get!(RefCounters, datastore_counters, (d.owner, d.id)) end # Update the local refcount if atomic_add!(ctrs.local_counter, 1) == 0 # We've never seen this DRef, so tell the owner DEBUG_REFCOUNTING[] && _enqueue_work(Core.print, "!! (", d.owner, ", ", d.id, ") at ", myid(), "\n") if myid() == d.owner poolref_owner(d.id, ctrs) else _enqueue_work(remotecall_wait, poolref_owner, d.owner, d.id) end end # We've received this DRef via transfer if recv if myid() == d.owner pooltransfer_recv_owner(d.id, myid()) else _enqueue_work(remotecall_wait, pooltransfer_recv_owner, d.owner, d.id, myid()) end end end "Called on owner when a worker first holds a reference to DRef with ID `id`." function poolref_owner(id::Int, ctrs=nothing) free = false if ctrs === nothing ctrs = with_lock(()->datastore_counters[(myid(), id)], datastore_counters_lock) end update_and_check_owner!(ctrs, id, 1) DEBUG_REFCOUNTING[] && _enqueue_work(Core.print, "== (", myid(), ", ", id, ")\n") end function poolunref(d::DRef) DEBUG_REFCOUNTING[] && _enqueue_work(Core.print, "vv (", d.owner, ", ", d.id, ") at ", myid(), "\n"; gc_context=true) # N.B. Runs in a finalizer context, so yielding is disallowed ctrs = @safe_lock_spin datastore_counters_lock begin @assert haskey(datastore_counters, (d.owner,d.id)) "poolunref called before any poolref (on $(myid())): ($(d.owner),$(d.id))" datastore_counters[(d.owner, d.id)] end update_and_check_local!(ctrs, d.owner, d.id, -1) end "Called on owner when a worker no longer holds any references to DRef with ID `id`." function poolunref_owner(id::Int, transfers::Dict{Int,Int}; gc_context=false) xfers = sum(map(sum, values(transfers))) ctrs = if gc_context @safe_lock_spin datastore_counters_lock begin @assert haskey(datastore_counters, (myid(),id)) "poolunref_owner called before any poolref_owner: ($(myid()), $id)" datastore_counters[(myid(), id)] end else with_lock(datastore_counters_lock) do @assert haskey(datastore_counters, (myid(),id)) "poolunref_owner called before any poolref_owner: ($(myid()), $id)" datastore_counters[(myid(), id)] end end if gc_context @safe_lock_spin ctrs.tx_lock begin for pid in keys(transfers) old = get(ctrs.recv_counters, pid, 0) ctrs.recv_counters[pid] = old + transfers[pid] end end else with_lock(ctrs.tx_lock) do for pid in keys(transfers) old = get(ctrs.recv_counters, pid, 0) ctrs.recv_counters[pid] = old + transfers[pid] end end end DEBUG_REFCOUNTING[] && _enqueue_work(Core.print, "@@ (", myid(), ", ", id, ") with xfers ", xfers, " and ", string(ctrs), "\n"; gc_context) update_and_check_owner!(ctrs, id, -1) end function pooltransfer_send_local(d::DRef, to_pid::Int) DEBUG_REFCOUNTING[] && _enqueue_work(Core.print, "-> (", d.owner, ", ", d.id, ") to ", to_pid, "\n") ctrs = with_lock(()->datastore_counters[(d.owner, d.id)], datastore_counters_lock) with_lock(ctrs.tx_lock) do prev = get(ctrs.send_counters, to_pid, 0) ctrs.send_counters[to_pid] = prev + 1 end end function pooltransfer_recv_owner(id::Int, to_pid::Int) ctrs = with_lock(()->datastore_counters[(myid(), id)], datastore_counters_lock) with_lock(ctrs.tx_lock) do prev = get(ctrs.recv_counters, to_pid, 0) ctrs.recv_counters[to_pid] = prev - 1 end update_and_check_owner!(ctrs, id, 0) end isinmemory(state::RefState) = storage_read(state).data !== nothing isondisk(state::RefState) = any(l->l.handle !== nothing, storage_read(state).leaves) isinmemory(id::Int) = isinmemory(with_lock(()->datastore[id], datastore_lock)) isondisk(id::Int) = isondisk(with_lock(()->datastore[id], datastore_lock)) isinmemory(x::DRef) = isinmemory(x.id) isondisk(x::DRef) = isondisk(x.id) const MEM_RESERVED = Ref{UInt}(512 * (1024^2)) # Reserve 512MB of RAM for OS const MEM_RESERVE_LOCK = Threads.ReentrantLock() const MEM_RESERVE_SWEEPS = Ref{Int}(3) """ When called, ensures that at least `MEM_RESERVED[]` bytes are available to the OS. If there is not enough memory available, then a variety of calls to the GC are performed to free up memory until either the reservation limit is satisfied, or `max_sweeps` number of cycles have elapsed. """ function ensure_memory_reserved(size::Integer=0; max_sweeps::Integer=MEM_RESERVE_SWEEPS[]) sat_sub(x::T, y::T) where T = x < y ? zero(T) : x-y max_sweeps == 0 && return # Do a quick (cached) check, to optimize for many calls to this function when memory isn't tight if Int(storage_available(CPURAMResource())) >= MEM_RESERVED[] return end # Check whether the OS is running tight on memory sweep_ctr = 0 while true with(QUERY_MEM_OVERRIDE => true) do Int(storage_available(CPURAMResource())) < MEM_RESERVED[] end || break # We need more memory! Let's encourage the GC to clear some memory... sweep_start = fasttime() mem_used = with(QUERY_MEM_OVERRIDE => true) do storage_utilized(CPURAMResource()) end if sweep_ctr == 0 @debug "Not enough memory to continue! Sweeping up unused memory..." GC.gc(false) elseif sweep_ctr == 1 GC.gc(true) else @everywhere GC.gc(true) end # Let finalizers run yield() # Wait for send queue to clear while SEND_QUEUE.processing || !isempty(SEND_QUEUE.queue) yield() end with(QUERY_MEM_OVERRIDE => true) do mem_freed = sat_sub(mem_used, storage_utilized(CPURAMResource())) @debug "Freed $(Base.format_bytes(mem_freed)) bytes, available: $(Base.format_bytes(storage_available(CPURAMResource())))" end sweep_ctr += 1 if sweep_ctr >= max_sweeps @debug "Made too many sweeps, bailing out..." break end end if sweep_ctr > 0 @debug "Swept for $sweep_ctr cycles" end end function poolset(@nospecialize(x), pid=myid(); size=approx_size(x), retain=false, restore=false, device=GLOBAL_DEVICE[], leaf_device=initial_leaf_device(device), tag=nothing, leaf_tag=Tag(), destructor=nothing) if pid == myid() if !restore @lock MEM_RESERVE_LOCK ensure_memory_reserved(size) end id = atomic_add!(id_counter, 1) sstate = if !restore StorageState(Some{Any}(x), Vector{StorageLeaf}(), device) else @assert !isa(leaf_device, CPURAMDevice) "Cannot use `CPURAMDevice()` as leaf device when `restore=true`" StorageState(nothing, [StorageLeaf(leaf_device, Some{Any}(x), retain)], device) end notify(sstate) state = RefState(sstate, size; tag, leaf_tag, destructor) rc = RefCounters() Threads.atomic_add!(rc.local_counter, 1) Threads.atomic_add!(rc.worker_counter, 1) with_lock(datastore_counters_lock) do datastore_counters[(pid, id)] = rc end with_lock(datastore_lock) do datastore[id] = state end DEBUG_REFCOUNTING[] && _enqueue_work(Core.print, "++ (", myid(), ", ", id, ") [", x, "]\n") d = DRef(myid(), id, size; doref=false) if !(restore && device === leaf_device) try write_to_device!(device, state, id) catch # Revert the root switch to ensure we don't try to delete notify(storage_rcu!(state) do sstate StorageState(sstate; root=CPURAMDevice()) end) rethrow() end end if retain retain_on_device!(device, state, id, true; all=true) end return d else # use our serialization remotecall_fetch(pid, MMWrap(x)) do wx poolset(unwrap_payload(wx), pid) end end end function forwardkeyerror(f) try f() catch err if isa(err, RemoteException) && isa(err.captured.ex, KeyError) rethrow(err.captured.ex) end rethrow(err) end end function poolget(ref::DRef) DEBUG_REFCOUNTING[] && _enqueue_work(Core.print, "?? (", ref.owner, ", ", ref.id, ") at ", myid(), "\n") original_ref = ref # Check global redirect cache ref = lock_read(REDIRECT_CACHE_LOCK) do get(REDIRECT_CACHE, ref, ref) end # Fetch the value (or a RedirectTo) from the owner @label fetch value = if ref.owner == myid() _getlocal(ref.id, false) else forwardkeyerror() do remotecall_fetch(ref.owner, ref) do ref MMWrap(_getlocal(ref.id, true)) end end end |> unwrap_payload if value isa RedirectTo # Redirected to a new ref, update the cache and try again @lock REDIRECT_CACHE_LOCK begin REDIRECT_CACHE[ref] = value.ref REDIRECT_CACHE[original_ref] = value.ref end ref = value.ref @goto fetch end return something(value) end function _getlocal(id, remote) state = with_lock(()->datastore[id], datastore_lock) lock_read(state.lock) do if state.redirect !== nothing return RedirectTo(state.redirect) end return Some{Any}(read_from_device(state, id, true)) end end function datastore_delete(id) @safe_lock_spin datastore_counters_lock begin DEBUG_REFCOUNTING[] && _enqueue_work(Core.print, "-- (", myid(), ", ", id, ") with ", string(datastore_counters[(myid(), id)]), "\n"; gc_context=true) ctrs = datastore_counters[(myid(), id)] refcounters_replace!(ctrs) delete!(datastore_counters, (myid(), id)) end state = @safe_lock_spin datastore_lock begin haskey(datastore, id) ? datastore[id] : nothing end (state === nothing) && return errormonitor(Threads.@spawn begin device = storage_read(state).root if device !== nothing delete_from_device!(device, state, id) end end) @safe_lock_spin datastore_lock begin haskey(datastore, id) && delete!(datastore, id) end dtor = state.destructor if dtor !== nothing dtor() end return end ## DRef migration/redirection """ migrate!(ref::DRef, to::Integer) -> DRef Migrate the data referenced by `ref` to another worker `to`, returning the new `DRef` which references the new data. The existing `ref` is still usable, and any accesses via `poolget` will seamlessly redirect to the new `DRef`, but the data is no longer stored on the same worker as `ref`. """ function migrate!(ref::DRef, to::Integer) @assert ref.owner != to "Cannot migrate a DRef within the same worker" if ref.owner != myid() return remotecall_fetch(migrate!, ref.owner, ref, to) end state = with_lock(()->datastore[ref.id], datastore_lock) # Lock the ref against further accesses # FIXME: Below is racey w.r.t data mutation local new_ref @lock state.lock begin # Read the current value of the ref data = read_from_device(state, ref, true) # Create new ref to redirect to new_ref = remotecall_fetch(poolset, to, data) # Set the redirect to our new ref state.redirect = new_ref # Delete the local data delete_from_device!(state, ref) end return new_ref end struct RedirectTo ref::DRef end const REDIRECT_CACHE = WeakKeyDict{DRef,DRef}() const REDIRECT_CACHE_LOCK = ReadWriteLock() ## Default data directory const SESSION = Ref{String}() default_dir(p) = joinpath(homedir(), ".mempool", "$(SESSION[])-$p") default_dir() = default_dir(myid()) default_path(r::DRef) = joinpath(default_dir(r.owner), string(r.id)) ## Disk caching configuration """ DiskCacheConfig Helper struct that stores the config for the disk caching setup. Handles either direct input or uses the ENVs from the original implementation. Latest applied config can be found in the `DISKCACHE_CONFIG[]`. """ struct DiskCacheConfig toggle::Bool membound::Int diskpath::AbstractString diskdevice::StorageDevice diskbound::Int allocator_type::AbstractString evict_delay::Int end function DiskCacheConfig(; toggle::Union{Nothing,Bool}=nothing, membound::Union{Nothing,Int}=nothing, diskpath::Union{Nothing,AbstractString}=nothing, diskdevice::Union{Nothing,StorageDevice}=nothing, diskbound::Union{Nothing,Int}=nothing, allocator_type::Union{Nothing,AbstractString}=nothing, evict_delay::Union{Nothing,Int}=nothing, ) toggle = something(toggle, parse(Bool, get(ENV, "JULIA_MEMPOOL_EXPERIMENTAL_FANCY_ALLOCATOR", "0"))) membound = something(membound, parse(Int, get(ENV, "JULIA_MEMPOOL_EXPERIMENTAL_MEMORY_BOUND", repr(8*(1024^3))))) diskpath = something(diskpath, get(ENV, "JULIA_MEMPOOL_EXPERIMENTAL_DISK_CACHE", joinpath(default_dir(), randstring(6)))) diskdevice = something(diskdevice, SerializationFileDevice(FilesystemResource(), diskpath)) diskbound = something(diskbound, parse(Int, get(ENV, "JULIA_MEMPOOL_EXPERIMENTAL_DISK_BOUND", repr(32*(1024^3))))) allocator_type = something(allocator_type, get(ENV, "JULIA_MEMPOOL_EXPERIMENTAL_ALLOCATOR_KIND", "MRU")) evict_delay = something(evict_delay, parse(Int, get(ENV, "JULIA_MEMPOOL_EXIT_EVICT_DELAY", "0"))) allocator_type βˆ‰ ("LRU", "MRU") && throw(ArgumentError("Unknown allocator kind: $allocator_type. Available types: LRU, MRU")) return DiskCacheConfig( toggle, membound, diskpath, diskdevice, diskbound, allocator_type, evict_delay, ) end """ setup_global_device!(cfg::DiskCacheConfig) Sets up the `GLOBAL_DEVICE` with a `SimpleRecencyAllocator` according to the provided `cfg`. The latest applied config can be found in `DISKCACHE_CONFIG[]`. """ function setup_global_device!(cfg::DiskCacheConfig) if !cfg.toggle return nothing end if !isa(GLOBAL_DEVICE[], CPURAMDevice) # This detects if a disk cache was already set up @warn( "Setting the disk cache config when one is already set will lead to " * "unexpected behavior and likely cause issues. Please restart the process " * "before changing the disk cache configuration. " * "If this warning is unexpected you may need to " * "clear the `JULIA_MEMPOOL_EXPERIMENTAL_FANCY_ALLOCATOR` ENV." ) end GLOBAL_DEVICE[] = SimpleRecencyAllocator( cfg.membound, cfg.diskdevice, cfg.diskbound, Symbol(cfg.allocator_type), ) # Set the config to a global ref for future reference on exit or elsewhere DISKCACHE_CONFIG[] = cfg return nothing end # Stores the last applied disk cache config for future reference const DISKCACHE_CONFIG = Ref{DiskCacheConfig}()
MemPool
https://github.com/JuliaData/MemPool.jl.git
[ "MIT" ]
0.4.9
b15bc48d53bf910cb0dc59ec92d7c147eeda1de1
code
3539
if Sys.isunix() if !isdefined(Base, :diskstat) struct StatFSStruct f_bsize::Culong f_frsize::Culong f_blocks::Culong f_bfree::Culong f_bavail::Culong f_files::Culong f_ffree::Culong f_favail::Culong f_fsid::Culong f_flag::Culong f_namemax::Culong reserved::NTuple{6,Cint} end function statvfs(path::String) buf = Ref{StatFSStruct}() GC.@preserve buf begin errno = ccall(:statvfs, Cint, (Cstring, Ptr{StatFSStruct}), path, buf) if errno != 0 Base.systemerror(errno) end buf[] end end function disk_stats(path::String) vfs = statvfs(path) return ( available = vfs.f_bavail * vfs.f_bsize, capacity = vfs.f_blocks * vfs.f_bsize ) end end # diskstat struct MntEntStruct mnt_fsname::Cstring mnt_dir::Cstring mnt_type::Cstring mnt_opts::Cstring mnt_freq::Cint mnt_passno::Cint end struct MntEntry mnt_fsname::String mnt_dir::String mnt_type::String mnt_opts::String mnt_freq::Cint mnt_passno::Cint end function getmntent(;fstab::String="/etc/fstab") bufs = MntEntry[] mnt = ccall(:setmntent, Ptr{Cvoid}, (Cstring, Cstring), fstab, "r") Base.systemerror("Failed to setmntent at $fstab", UInt64(mnt) == 0) while true ret = ccall(:getmntent, Ptr{MntEntStruct}, (Ptr{Cvoid},), mnt) UInt64(ret) == 0 && break buf = unsafe_load(ret) push!(bufs, MntEntry( deepcopy(unsafe_string(buf.mnt_fsname)), deepcopy(unsafe_string(buf.mnt_dir)), deepcopy(unsafe_string(buf.mnt_type)), deepcopy(unsafe_string(buf.mnt_opts)), buf.mnt_freq, buf.mnt_passno )) end ccall(:endmntent, Cvoid, (Ptr{Cvoid},), mnt) bufs end mountpoints() = map(mnt->mnt.mnt_dir, getmntent()) elseif Sys.iswindows() if !isdefined(Base, :diskstat) function disk_stats(path::String) lpDirectoryName = path lpFreeBytesAvailableToCaller = Ref{Int64}(0) lpTotalNumberOfBytes = Ref{Int64}(0) lpTotalNumberOfFreeBytes = Ref{Int64}(0) r = ccall( (:GetDiskFreeSpaceExA, "kernel32"), Bool, (Cstring, Ref{Int64}, Ref{Int64}, Ref{Int64}), lpDirectoryName, lpFreeBytesAvailableToCaller, lpTotalNumberOfBytes, lpTotalNumberOfFreeBytes ) @assert r "Failed to query disk stats of $path" return ( available = lpFreeBytesAvailableToCaller[], capacity = lpTotalNumberOfBytes[] ) end end # diskstat function mountpoints() mounts = String[] path = Libc.malloc(256) mnt = ccall((:FindFirstVolumeW, "kernel32"), Ptr{Cvoid}, (Ptr{Cvoid}, Int), path, 256) Base.systemerror("Failed to FindFirstVolumeW at $path", UInt64(mnt) == 0) push!(mounts, unsafe_string(reinterpret(Cstring, path))) while true ret = ccall((:FindNextVolumeW, "kernel32"), Bool, (Ptr{Cvoid}, Ptr{Cvoid}, Int), mnt, path, 256) ret || break push!(mounts, unsafe_string(reinterpret(Cstring, path))) end ccall((:FindVolumeClose, "kernel32"), Cvoid, (Ptr{Cvoid},), mnt) Libc.free(path) mounts end else mountpoints() = error("Not implemented yet") if !isdefined(Base, :diskstat) disk_stats(path::String) = error("Not implemented yet") end end if isdefined(Base, :diskstat) function disk_stats(path::String) stats = Base.diskstat(path) return (available=stats.available, capacity=stats.total) end end
MemPool
https://github.com/JuliaData/MemPool.jl.git
[ "MIT" ]
0.4.9
b15bc48d53bf910cb0dc59ec92d7c147eeda1de1
code
6024
##### Array{T} ##### using Mmap struct MMSer{T} end # sentinel type used in deserialize to switch to our # custom memory-mapping deserializers can_mmap(io::IOStream) = true can_mmap(io::IO) = false function padalign(io::IOStream, align=8) align = (align + 7) & -8 # make sure alignment is a multiple of 8 p = position(io) if p & (align-1) != 0 write(io, zeros(UInt8, (align - (p % align)))) end end padalign(io, align=8) = nothing function seekpadded(io::IOStream, align=8) align = (align + 7) & -8 p = position(io) if p & (align-1) != 0 seek(io, p + (align - (p % align))) end end seekpadded(io, align=8) = nothing function deserialize(s::AbstractSerializer, ::Type{MMSer{T}}) where T mmread(T, s, can_mmap(s.io)) end mmwrite(io::IO, xs) = mmwrite(Serializer(io), xs) mmwrite(io::AbstractSerializer, xs) = serialize(io, xs) # fallback function mmwrite(io::AbstractSerializer, arr::A) where A<:Union{Array,BitArray} T = eltype(A) Serialization.serialize_type(io, MMSer{typeof(arr)}) if T<:Union{} || T==Missing serialize(io, size(arr)) return elseif isbitstype(T) serialize(io, size(arr)) padalign(io.io, sizeof(eltype(arr))) write(io.io, arr) return end fl = fixedlength(T) if fl > 0 serialize(io, size(arr)) for x in arr fast_write(io.io, x) end else serialize(io, arr) end end function mmread(::Type{A}, io, mmap) where A<:Union{Array,BitArray} T = eltype(A) if T<:Union{} || T==Missing sz = deserialize(io) return Array{T}(undef, sz) elseif isbitstype(T) sz = deserialize(io) seekpadded(io.io, sizeof(T)) if prod(sz) == 0 return A(undef, sz...) end if mmap data = Mmap.mmap(io.io, A, sz, position(io.io)) seek(io.io, position(io.io)+sizeof(data)) # move return data else data = Base.read!(io.io, A(undef, sz...)) return data end end fl = fixedlength(T) if fl > 0 sz = deserialize(io) arr = A(undef, sz...) @inbounds for i in eachindex(arr) arr[i] = fast_read(io.io, T)::T end return arr else return deserialize(io) # slow!! end end ##### Array{String} ##### const UNDEF_LENGTH = typemax(UInt32) # if your string is exactly 4GB you're out of luck function mmwrite(io::AbstractSerializer, xs::Array{String}) Serialization.serialize_type(io, MMSer{typeof(xs)}) lengths = UInt32[] buffer = UInt8[] serialize(io, size(xs)) # todo: write directly to buffer, but also mmap ptr = pointer(buffer) for i in 1:length(xs) if isassigned(xs, i) x = xs[i] l = sizeof(x) lb = length(buffer) push!(lengths, l) resize!(buffer, lb+l) unsafe_copyto!(pointer(buffer)+lb, pointer(x), l) ptr += l else push!(lengths, UNDEF_LENGTH) end end mmwrite(io, buffer) mmwrite(io, lengths) end function mmread(::Type{Array{String,N}}, io, mmap) where N sz = deserialize(io) buf = deserialize(io) lengths = deserialize(io) #@assert length(buf) == sum(filter(x->x>0, lengths)) #@assert prod(sz) == length(lengths) ys = Array{String,N}(undef, (sz...,)) # output ptr = pointer(buf) @inbounds for i = 1:length(ys) l = lengths[i] l == UNDEF_LENGTH && continue ys[i] = unsafe_string(ptr, l) ptr += l end ys end ## Optimized fixed length IO ## E.g. this is very good for `StaticArrays.MVector`s function fixedlength(t::Type, cycles=IdDict()) if isbitstype(t) return sizeof(t) elseif isa(t, UnionAll) || isabstracttype(t) || Base.isbitsunion(t) return -1 elseif isa(t, Union) && Base.argument_datatype(t) === nothing return -1 end if haskey(cycles, t) return -1 end cycles[t] = nothing lens = ntuple(i->fixedlength(fieldtype(t, i), copy(cycles)), fieldcount(t)) if isempty(lens) # e.g. abstract type / array type return -1 elseif any(x -> x < 0, lens) return -1 else return sum(lens) end end fixedlength(::Type{>:Missing}, cycles=nothing) = -1 fixedlength(::Type{<:String}, cycles=nothing) = -1 fixedlength(::Type{Union{}}, cycles=nothing) = -1 fixedlength(::Type{<:Ptr}, cycles=nothing) = -1 function gen_writer(::Type{T}, expr) where T @assert fixedlength(T) >= 0 "gen_writer must be called for fixed length eltypes" if T<:Tuple && isbitstype(T) :(write(io, Ref{$T}($expr))) elseif length(T.types) > 0 :(begin $([gen_writer(fieldtype(T, i), :(getfield($expr, $i))) for i=1:fieldcount(T)]...) end) elseif isbitstype(T) && sizeof(T) == 0 return :(begin end) elseif isbitstype(T) return :(write(io, $expr)) else error("Don't know how to serialize $T") end end function gen_reader(::Type{T}) where T @assert fixedlength(T) >= 0 "gen_reader must be called for fixed length eltypes" ex = if T<:Tuple if isbitstype(T) return :(read!(io, Ref{$T}())[]) else exprs = [gen_reader(fieldtype(T, i)) for i=1:fieldcount(T)] return :(tuple($(exprs...))) end elseif length(T.types) > 0 return :(ccall(:jl_new_struct, Any, (Any,Any...), $T, $([gen_reader(fieldtype(T, i)) for i=1:fieldcount(T)]...))) elseif isbitstype(T) && sizeof(T) == 0 return :(ccall(:jl_new_struct, Any, (Any,Any...), $T)) elseif isbitstype(T) return :($T; read(io, $T)) else error("Don't know how to deserialize $T") end return :($T; $ex) end @generated function fast_write(io, x) gen_writer(x, :x) end @generated function fast_read(io, ::Type{T}) where T gen_reader(T) end
MemPool
https://github.com/JuliaData/MemPool.jl.git
[ "MIT" ]
0.4.9
b15bc48d53bf910cb0dc59ec92d7c147eeda1de1
code
2232
# Copied from CUDA.jl/src/pool.jl """ NonReentrantLock Simple non-reentrant lock that errors when trying to reenter on the same task. """ struct NonReentrantLock # <: Base.AbstractLock rl::ReentrantLock NonReentrantLock() = new(ReentrantLock()) end function Base.lock(nrl::NonReentrantLock) @assert !islocked(nrl.rl) || nrl.rl.locked_by !== current_task() lock(nrl.rl) end function Base.trylock(nrl::NonReentrantLock) @assert !islocked(nrl.rl) || nrl.rl.locked_by !== current_task() trylock(nrl.rl) end Base.unlock(nrl::NonReentrantLock) = unlock(nrl.rl) if VERSION >= v"1.7.0-DEV" # as of v1.7 locks in Base disable finalizers enable_finalizers(on::Bool) = nothing else # NonReentrantLock may be taken around code that might call the GC, which might # reenter through finalizers. Avoid that by temporarily disabling finalizers # running concurrently on this thread. enable_finalizers(on::Bool) = ccall(:jl_gc_enable_finalizers, Cvoid, (Ptr{Cvoid}, Int32,), Core.getptls(), on) end macro safe_lock(l, ex) quote temp = $(esc(l)) lock(temp) enable_finalizers(false) try $(esc(ex)) finally unlock(temp) enable_finalizers(true) end end end # If we actually want to acquire a lock from a finalizer, we can't cause a task # switch. As a NonReentrantLock can only be taken by another thread that should # be running, and not a concurrent task we'd need to switch to, we can safely # spin. macro safe_lock_spin(l, ex) quote temp = $(esc(l)) while !trylock(temp) # we can't yield here GC.safepoint() end enable_finalizers(false) # retains compatibility with non-finalizer callers try $(esc(ex)) finally unlock(temp) enable_finalizers(true) end end end """ with_lock(f, lock, cond=true) Conditionally take lock `lock`, execute `f`, and unlock `lock`. If `!cond`, then `lock` is not taken or released. """ function with_lock(f, lock, cond=true) if cond @safe_lock lock begin f() end else f() end end
MemPool
https://github.com/JuliaData/MemPool.jl.git
[ "MIT" ]
0.4.9
b15bc48d53bf910cb0dc59ec92d7c147eeda1de1
code
4147
# Adapted from ConcurrentUtils/src/read_write_lock.jl import UnsafeAtomics abstract type AbstractReadWriteLock <: Base.AbstractLock end const NOTLOCKED = UInt64(0) const NREADERS_INC = UInt64(2) const WRITELOCK_MASK = UInt64(1) const NReadersAndWritelock = UInt64 mutable struct ReadWriteLock <: AbstractReadWriteLock @atomic nreaders_and_writelock::NReadersAndWritelock # TODO: use condition variables with lock-free notify const lock::ReentrantLock const cond_read::Threads.Condition const cond_write::Threads.Condition end function fieldoffset_by_name(T, field) for idx in 1:nfields(T) if fieldnames(T)[idx] == field return fieldoffset(T, idx) end end error("No such field for $T: $field") end const OFFSET_NREADERS_AND_WRITELOCK = fieldoffset_by_name(ReadWriteLock, :nreaders_and_writelock) function ReadWriteLock() lock = ReentrantLock() cond_read = Threads.Condition(lock) cond_write = Threads.Condition(lock) return ReadWriteLock(NOTLOCKED, lock, cond_read, cond_write) end # Not very efficient but lock-free function trylock_read(rwlock::ReadWriteLock; nspins = -∞, ntries = -∞) local ns::Int = 0 local nt::Int = 0 while true old = @atomic :monotonic rwlock.nreaders_and_writelock if iszero(old & WRITELOCK_MASK) # Try to acquire reader lock without the responsibility to receive or send the # notification: old, success = @atomicreplace( :acquire_release, :monotonic, rwlock.nreaders_and_writelock, old => old + NREADERS_INC, ) success && return true nt += 1 nt < ntries || return false end ns += 1 ns < nspins || return false end end function lock_read(rwlock::ReadWriteLock) # Using hardware FAA ptr = Ptr{NReadersAndWritelock}( pointer_from_objref(rwlock) + OFFSET_NREADERS_AND_WRITELOCK, ) GC.@preserve rwlock begin _, n = UnsafeAtomics.modify!(ptr, +, NREADERS_INC, UnsafeAtomics.acq_rel) end # n = @atomic :acquire_release rwlock.nreaders_and_writelock += NREADERS_INC if iszero(n & WRITELOCK_MASK) return end lock(rwlock.lock) do while true local n = @atomic :acquire rwlock.nreaders_and_writelock if iszero(n & WRITELOCK_MASK) @assert n > 0 return end wait(rwlock.cond_read) end end end function unlock_read(rwlock::ReadWriteLock) # Using hardware FAA ptr = Ptr{NReadersAndWritelock}( pointer_from_objref(rwlock) + OFFSET_NREADERS_AND_WRITELOCK, ) GC.@preserve rwlock begin _, n = UnsafeAtomics.modify!(ptr, -, NREADERS_INC, UnsafeAtomics.acq_rel) end # n = @atomic :acquire_release rwlock.nreaders_and_writelock -= NREADERS_INC @assert iszero(n & WRITELOCK_MASK) if iszero(n) lock(rwlock.lock) do notify(rwlock.cond_write; all = false) end end return end function Base.trylock(rwlock::ReadWriteLock) _, success = @atomicreplace( :acquire_release, :monotonic, rwlock.nreaders_and_writelock, NOTLOCKED => WRITELOCK_MASK, ) return success::Bool end function Base.lock(rwlock::ReadWriteLock) if trylock(rwlock) return end lock(rwlock.lock) do while true if trylock(rwlock) return end wait(rwlock.cond_write) end end end function Base.unlock(rwlock::ReadWriteLock) @assert !iszero(rwlock.nreaders_and_writelock & WRITELOCK_MASK) @atomic :acquire_release rwlock.nreaders_and_writelock &= ~WRITELOCK_MASK lock(rwlock.lock) do notify(rwlock.cond_read) notify(rwlock.cond_write; all = false) end return end ### ### High-level APIs ### lock_read(lck) = lock(lck) unlock_read(lck) = unlock(lck) function lock_read(f, lock) lock_read(lock) try return f() finally unlock_read(lock) end end
MemPool
https://github.com/JuliaData/MemPool.jl.git
[ "MIT" ]
0.4.9
b15bc48d53bf910cb0dc59ec92d7c147eeda1de1
code
44109
""" # Storage System Design ## Overview MemPool implements a data storage system for allowing automated transfers of DRef-associated data to storage media. The storage system is designed for the "swap-to-disk" use case, where some portion of data is kept on-disk, and swapped into memory as-needed, to support out-of-core operations. The storage system is designed to be performant, flexible, and user-configurable, allowing all kinds of definitions of "disk" to be implemented (including network stores, tape drives, etc.). The storage system is rooted on two abstract types, the `StorageResource`, and the `StorageDevice`. Briefly, a `StorageResource` represents some finite storage medium (such as a hard disk-backed filesystem, an S3 bucket, etc.), while a `StorageDevice` represents a mechanism by which data can be written to and read from an associated `StorageResource`. For example, the built-in `FilesystemResource` is a `StorageResource` which represents a mounted filesystem, while the `GenericFileDevice` is a `StorageDevice` which can store data on a `FilesystemResource` as a set of files written using user-provided serialization and deserialization methods. Other `StorageResource`s and `StorageDevice`s may be implemented by libraries or users to suit the specific storage medium and use case. All DRefs have an associated `StorageDevice`, which manages the reference's data. The global device is available at `GLOBAL_DEVICE[]`, and may be set by the user to whatever device is most desirable as a default. When a DRef is created with `poolset`, a "root" `StorageDevice` will be associated which manages the reference's data, either directly, or indirectly by managing other `StorageDevice`s. For example, the built-in `SimpleRecencyAllocator` can use assigned as the root device to manage a `CPURAMDevice` and a `GenericFileDevice` to provide automatic swap-to-disk in a least-recently-used fashion. ## Entrypoints The entrypoints of the storage system are: - `poolset` - Writes the data at DRef creation time - `poolget` - Reads the data to return it to the caller - GC finalization - Unaccessible DRefs cause the data to be deleted The associated internal storage functions are (respectively): - `write_to_device!` - Writes the data from memory to the device - `read_from_device` - Reads the data into memory from the device - `delete_from_device!` - Deletes the data from the device - `retain_on_device!` - Controls whether data is retained on device upon deletion The internal storage functions don't map exactly to the entrypoints; a `poolget` might require writing some unrelated data from memory to disk before the desired data can be read from disk to memory, and both existing data locations will then probably need to be deleted to minimize storage usage. ## Internal Consistency To allow for concurrent storage operations on unrelated data, the storage system uses a set of locks and atomics/read-copy-update to ensure safe and consistent access to data, while minimizing unnecessary wait time. Globally, the datastore maintains a single lock that protects access to the mapping from DRef ID to the associated `RefState` object. The `RefState` object contains everything necessary to access and manipulate the DRef's data, and the object is maintained for the lifetime of the DRef, allowing it to be cached and threaded through function to minimize datastore lock contention. This globally datastore lock is thus a short point of contention which should only rarely need to be accessed (ideally only once, and very briefly, per storage entrypoint). The `RefState` object contains some basic information about the data, including its estimated size in memory. Most importantly, it also points to a `StorageState` object, which contains all of the information relevant to storing and retrieving the data. The `StorageState` field of `RefState` is atomically accessed, ensuring it always points to a valid object. The `StorageState` itself contains fields for the root device, the "leaf" devices (the devices which physically performs reads/writes/deletes), the in-memory data (if any), and the on-device data (if any). The `StorageState`'s fields are not always safe to access; thus, they are protected by a field containing a `Base.Event`, which indicates when the other fields are safe to access. Once the event has been notified, all other fields may be safely read from. Any `getproperty` call to a `StorageState` field waits on this event to complete, which ensures that all in-flight operations have completed. In order to transition data to-and-from memory and disk, the `StorageState` contained in the `RefState` can be atomically swapped with a new one which represents the new state of the DRef's data. Making such a transition occurs with the `storage_rcu!` helper, which can safely construct a new `StorageState`, and install it as the new "source of truth". This helper uses the read-copy-update pattern, or RCU, to ensure that new `StorageState` is always based on the existing `StorageState`. Pairing with this helper is `storage_read`, which can safely read the current `StorageState` from its `RefState`. This setup allows devices to safely determine the current state of a DRef's data, and to ensure that all readers can see a fully-formed `StorageState` object. This also makes it easy to ensure that data is never accidentally lost by two conflicting transitions. However, it is important to note that by the time a field of a read `StorageState` is accessed, the current state of the DRef's data may have changed. This is not normally something to worry about; whatever `StorageState` was just read can treated as the (temporary) source of truth. Of course, `StorageState`s should thus never be cached or reused, as they can quickly become stale, and doing so might preserve excessive data. This system also has the benefit of providing concurrent and lazy storage operations; the `StorageState` for a `RefState` may be transitioned before the actual data transfer completes, and synchronization will only occur when the `StorageState` is later accessed. And any two tasks which operate on different DRefs (and which have their associated `RefState`s available) may never content with each other. ## Queries MemPool provides utilities to query `StorageResource`s and `StorageDevice`s for their capacity and current utilization. The `storage_capacity`, `storage_utilization`, and `storage_available` utilities work as expected on `StorageResource` objects, and return values in bytes. Additionally, because `StorageDevice`s can access multiple `StorageResource`s, the same utilities can also be optionally passed a `StorageResource` object to limit queries to that specific resource. ## Limits and Limitations Devices like the `SimpleRecencyAllocator` set byte limits on how much data may reside in memory and on disk. These limits are, unfortunately, not exact or guaranteed, because of the nature of Julia's GC, and an inability of MemPool to intercept user allocations. Thus, the ability of such devices to manage memory or report the correct amount of storage media utilization is limited, and should be taken as an approximation. Additionally, leaf devices may report a storage utilization level that varies according to external forces, such as by allocations from other processes on the system, or fluctuations in OS memory management. The `externally_varying` query on a `StorageDevice` will return `true` if the device is subject to such unpredictable variation. A result of `false` implies that the device is not aware of such variation, and lives in an "ideal world" where the device fully controls all storage variations; of course, this is an approximation of reality, and does not actually reflect how much of the physical resources are truly available. """ include("fsinfo.jl") """ StorageResource The supertype for all storage resources. Any subtype represents some storage hardware or modality (RAM, disk, NAS, VRAM, tape, etc.) where data can be stored, and which has some current usage level and a maximum capacity (both measures in bytes). Storage resources are generally unique (although they may alias), and do not represent a method of storing/loading data to/from the resource; instead, a `StorageDevice` provides an actual implementation of such operations on one or more resources. """ abstract type StorageResource end storage_available(::StorageResource) = 0 storage_capacity(::StorageResource) = 0 storage_utilized(s::StorageResource) = storage_capacity(s) - storage_available(s) "Represents CPU RAM." struct CPURAMResource <: StorageResource end if Sys.islinux() function free_memory() open("/proc/meminfo", "r") do io # TODO: Cache in TLS buf = zeros(UInt8, 128) readbytes!(io, buf) free = match(r"MemAvailable:\s*([0-9]*)\s.*", String(buf)).captures[1] return parse(UInt64, free) * 1024 end end else # FIXME: Sys.free_memory() includes OS caches free_memory() = Sys.free_memory() end storage_available(::CPURAMResource) = _query_mem_periodically(:available) storage_capacity(::CPURAMResource) = _query_mem_periodically(:capacity) mutable struct QueriedMemInfo value::UInt64 last_ns::UInt64 end QueriedMemInfo() = QueriedMemInfo(UInt64(0), UInt64(0)) const QUERY_MEM_AVAILABLE = Ref(QueriedMemInfo()) const QUERY_MEM_CAPACITY = Ref(QueriedMemInfo()) const QUERY_MEM_PERIOD = Ref(10 * 1000^2) # 10ms const QUERY_MEM_OVERRIDE = ScopedValue(false) function _query_mem_periodically(kind::Symbol) if !(kind in (:available, :capacity)) throw(ArgumentError("Invalid memory query kind: $kind")) end mem_bin = if kind == :available QUERY_MEM_AVAILABLE elseif kind == :capacity QUERY_MEM_CAPACITY end mem_info = mem_bin[] now_ns = fasttime() if QUERY_MEM_OVERRIDE[] || mem_info.last_ns < now_ns - QUERY_MEM_PERIOD[] if kind == :available new_mem_info = QueriedMemInfo(free_memory(), now_ns) elseif kind == :capacity new_mem_info = QueriedMemInfo(Sys.total_memory(), now_ns) end mem_bin[] = new_mem_info return new_mem_info.value else return mem_info.value end end "Represents a filesystem mounted at a given path." struct FilesystemResource <: StorageResource mountpoint::String end FilesystemResource() = FilesystemResource(Sys.iswindows() ? "C:" : "/") storage_available(s::FilesystemResource) = disk_stats(s.mountpoint).available storage_capacity(s::FilesystemResource) = disk_stats(s.mountpoint).capacity """ StorageDevice The abstract supertype of all storage devices. A `StorageDevice` must implement `movetodevice!`, `read_from_device`, and `delete_from_device!`. A `StorageDevice` may reflect a mechanism for storing data on persistent storage, or it may be an allocator which manages other `StorageDevice`s. See the `GenericFileDevice` for an example of how to implement a data store. When implementing an allocator, it's recommended to provide options that users can tune to control how many bytes of storage may be used for each managed `StorageDevice`. This makes it easier for users to predict the amount of storage that a given piece of code can use. See the `SimpleRecencyAllocator` for a (relatively) simple example of how to implement an allocator. """ abstract type StorageDevice end # TODO: Docstrings storage_available(dev::StorageDevice) = sum(res->storage_available(dev, res), storage_resources(dev); init=UInt64(0)) storage_capacity(dev::StorageDevice) = sum(res->storage_capacity(dev, res), storage_resources(dev); init=UInt64(0)) storage_utilized(dev::StorageDevice) = sum(res->storage_utilized(dev, res), storage_resources(dev); init=UInt64(0)) check_same_resource(dev::StorageDevice, expected::StorageResource, res::StorageResource) = (expected === res) || throw(ArgumentError("Invalid storage resource $res for device $dev")) storage_available(dev::StorageDevice, res::StorageResource) = throw(ArgumentError("Invalid storage resource $res for device $dev")) storage_capacity(dev::StorageDevice, res::StorageResource) = throw(ArgumentError("Invalid storage resource $res for device $dev")) storage_utilized(dev::StorageDevice, res::StorageResource) = throw(ArgumentError("Invalid storage resource $res for device $dev")) struct Tag tags::Dict{Type,Any} Tag(tags...) = new(Dict{Type,Any}(tags...)) Tag(::Nothing) = new(Dict{Type,Any}()) end const EMPTY_TAG = Tag(nothing) Tag() = EMPTY_TAG Base.getindex(tag::Tag, ::Type{device}) where {device<:StorageDevice} = get(tag.tags, device, nothing) mutable struct StorageLeaf # A low-level storage device device::StorageDevice # The handle associated with the device handle::Union{Some{Any}, Nothing} # Whether to retain the underlying data retain::Bool end StorageLeaf(device, handle) = StorageLeaf(device, handle, false) StorageLeaf(device) = StorageLeaf(device, nothing, false) "Safely copies a `Vector{StorageLeaf}` for later mutation." copy_leaves(leaves::Vector{StorageLeaf}) = StorageLeaf[StorageLeaf(leaf.device, leaf.handle, leaf.retain) for leaf in leaves] mutable struct StorageState # The in-memory value of the reference data::Union{Some{Any}, Nothing} # The low-level devices and handles physically storing the reference's values leaves::Vector{StorageLeaf} # The high-level device managing the reference's values root::StorageDevice # Notifies waiters when all fields become useable ready::Base.Event end StorageState(data, leaves, root) = StorageState(data, leaves, root, Base.Event()) StorageState(old::StorageState; data=old.data, leaves=old.leaves, root=old.root) = StorageState(data, leaves, root, Base.Event()) function Base.getproperty(sstate::StorageState, field::Symbol) if field == :ready return getfield(sstate, :ready) end wait(sstate.ready) return getfield(sstate, field) end Base.notify(sstate::StorageState) = notify(sstate.ready) Base.wait(sstate::StorageState) = wait(sstate.ready) mutable struct RefState # The storage state associated with the reference and its values @atomic storage::StorageState # The estimated size that the values of the reference take in memory size::UInt64 # Metadata to associate with the reference tag::Any leaf_tag::Tag # Destructor, if any destructor::Any # A Reader-Writer lock to protect access to this struct lock::ReadWriteLock # The DRef that this value may be redirecting to redirect::Union{DRef,Nothing} end RefState(storage::StorageState, size::Integer; tag=nothing, leaf_tag=Tag(), destructor=nothing) = RefState(storage, size, tag, leaf_tag, destructor, ReadWriteLock(), nothing) function Base.getproperty(state::RefState, field::Symbol) if field === :storage throw(ArgumentError("Cannot directly read `:storage` field of `RefState`\nUse `storage_read(state)` instead")) elseif field === :tag || field === :leaf_tag throw(ArgumentError("Cannot directly read `$(repr(field))` field of `RefState`\nUse `tag_read(state, storage_read(state), device)` instead")) end return getfield(state, field) end function Base.setproperty!(state::RefState, field::Symbol, value) if field === :storage throw(ArgumentError("Cannot directly write `:storage` field of `RefState`\nUse `storage_rcu!(f, state)` instead")) elseif field === :tag || field === :leaf_tag throw(ArgumentError("Cannot write `$(repr(field))` field of `RefState`")) end return setfield!(state, field, value) end Base.show(io::IO, state::RefState) = print(io, "RefState(size=$(Base.format_bytes(state.size)), tag=$(getfield(state, :tag)), leaf_tag=$(getfield(state, :leaf_tag)))") function tag_read(state::RefState, sstate::StorageState, device::StorageDevice) if sstate.root === device return getfield(state, :tag) end return getfield(state, :leaf_tag)[typeof(device)] end "Returns the size of the data for reference `id`." storage_size(ref::DRef) = (@assert ref.owner == myid(); storage_size(ref.id)) storage_size(id::Int) = storage_size(with_lock(()->datastore[id], datastore_lock)) storage_size(state::RefState) = state.size """ write_to_device!(device::StorageDevice, state::RefState, id::Int) Writes reference `id`'s data to `device`. Reads the data into memory first (via `read_from_device(CPURAMDevice(), id)`) if necessary. """ function write_to_device! end write_to_device!(state::RefState, ref::DRef) = write_to_device!(storage_read(state).root, state, ref.id) write_to_device!(device::StorageDevice, ref::DRef) = write_to_device!(device, with_lock(()->datastore[ref.id], datastore_lock), ref.id) write_to_device!(device::StorageDevice, state::RefState, ref::DRef) = write_to_device!(device, state, ref.id) write_to_device!(state::RefState, id::Int) = write_to_device!(storage_read(state).root, state, id) write_to_device!(device::StorageDevice, id::Int) = write_to_device!(device, with_lock(()->datastore[id], datastore_lock), id) """ read_from_device(device::StorageDevice, state::RefState, id::Int, ret::Bool) -> Any Access the value of reference `id` from `device`, and return it if `ret` is `true`; if `ret` is `false`, then the value is not actually retrieved, but internal counters may be updated to account for this access. Also ensures that the values for reference `id` are in memory; if necessary, they will be read from the reference's true storage device. """ function read_from_device end read_from_device(state::RefState, ref::DRef, ret::Bool) = read_from_device(storage_read(state).root, state, ref.id, ret) read_from_device(device::StorageDevice, ref::DRef, ret::Bool) = read_from_device(device, with_lock(()->datastore[ref.id], datastore_lock), ref.id, ret) read_from_device(device::StorageDevice, state::RefState, ref::DRef, ret::Bool) = read_from_device(device, state, ref.id, ret) read_from_device(state::RefState, id::Int, ret::Bool) = read_from_device(storage_read(state).root, state, id, ret) read_from_device(device::StorageDevice, id::Int, ret::Bool) = read_from_device(device, with_lock(()->datastore[id], datastore_lock), id, ret) """ delete_from_device!(device::StorageDevice, state::RefState, id::Int) Delete reference `id`'s data from `device`, such that upon return, the space used by the previously-referenced data is now available for allocating to other data. """ function delete_from_device! end delete_from_device!(state::RefState, ref::DRef) = delete_from_device!(storage_read(state).root, state, ref.id) delete_from_device!(device::StorageDevice, ref::DRef) = delete_from_device!(device, with_lock(()->datastore[ref.id], datastore_lock), ref.id) delete_from_device!(device::StorageDevice, state::RefState, ref::DRef) = delete_from_device!(device, state, ref.id) delete_from_device!(state::RefState, id::Int) = delete_from_device!(storage_read(state).root, state, id) delete_from_device!(device::StorageDevice, id::Int) = delete_from_device!(device, with_lock(()->datastore[id], datastore_lock), id) """ retain_on_device!(device::StorageDevice, state::RefState, id::Int, retain::Bool; all::Bool=false) Sets the retention state of reference `id` for `device`. If `retain` is `false` (the default when references are created), then data will be deleted from `device` upon a call to `delete_from_device!`; if `retain` is `true`, then the data will continue to exist on the device (if possible) upon a call to `delete_from_device!`. If the `all` kwarg is set to `true`, then any registered leaf devices will have their retain value set to `retain`. retain_on_device!(device::StorageDevice, retain::Bool) Sets the retention state of all references stored on `device` (if possible). """ function retain_on_device! end retain_on_device!(state::RefState, ref::DRef, retain::Bool; kwargs...) = retain_on_device!(storage_read(state).root, state, ref.id, retain; kwargs...) retain_on_device!(device::StorageDevice, ref::DRef, retain::Bool; kwargs...) = retain_on_device!(device, with_lock(()->datastore[ref.id], datastore_lock), ref.id, retain; kwargs...) retain_on_device!(device::StorageDevice, state::RefState, ref::DRef, retain::Bool; kwargs...) = retain_on_device!(device, state, ref.id, retain; kwargs...) retain_on_device!(state::RefState, id::Int, retain::Bool; kwargs...) = retain_on_device!(storage_read(state).root, state, id, retain; kwargs...) retain_on_device!(device::StorageDevice, id::Int, retain::Bool; kwargs...) = retain_on_device!(device, with_lock(()->datastore[id], datastore_lock), id, retain; kwargs...) function retain_on_device!(device::StorageDevice, state::RefState, id::Int, retain::Bool; all=false) notify(storage_rcu!(state) do sstate leaf_devices = map(l->l.device, sstate.leaves) devices = if all if device !== sstate.root retain_error_invalid_device(device) end leaf_devices else if findfirst(d->d===device, leaf_devices) === nothing retain_error_invalid_device(device) end StorageDevice[device] end leaves = copy_leaves(sstate.leaves) for device in devices idx = findfirst(l->l.device === device, leaves) if idx === nothing retain_error_invalid_device(device) end leaf = leaves[idx] leaves[idx] = StorageLeaf(leaf.device, leaf.handle, retain) end return StorageState(sstate; leaves) end) return end @inline retain_error_invalid_device(device) = throw(ArgumentError("Attempted to retain to invalid device: $device")) retain_on_device!(device::StorageDevice, retain::Bool) = nothing """ isretained(state::RefState, device::StorageDevice) -> Bool Returns `true` if the reference identified by `state` is retained on `device`; returns `false` otherwise. """ function isretained(state::RefState, device::StorageDevice) sstate = storage_read(state) for leaf in leaves if leaf.device === device && leaf.retain return true end end return false end isretained(id::Int, device::StorageDevice) = isretained(with_lock(()->datastore[id], datastore_lock), device) """ set_device!(device::StorageDevice, state::RefState, id::Int) Sets the root device for reference `id` to be `device`, writes the reference to that device, and waits for the write to complete. """ function set_device!(device::StorageDevice, state::RefState, id::Int) sstate = storage_read(state) old_device = sstate.root # Skip if the root is already set # FIXME: Why do we care if the leaf is set? if old_device === device && findfirst(l->l.device === device, sstate.leaves) !== nothing return end # Read from old device to ensure data is in memory read_from_device(old_device, state, id, false) # Switch root notify(storage_rcu!(state) do sstate StorageState(sstate; root=device) end) try # Write to new device write_to_device!(device, state, id) catch # Revert root switch notify(storage_rcu!(state) do sstate StorageState(sstate; root=old_device) end) rethrow() end return end set_device!(device::StorageDevice, id::Int) = set_device!(device, with_lock(()->datastore[id], datastore_lock), id) function set_device!(device::StorageDevice, ref::DRef) @assert ref.owner == myid() set_device!(device, ref.id) end function set_device!(device::StorageDevice, state::RefState, ref::DRef) @assert ref.owner == myid() set_device!(device, state, ref.id) end """ externally_varying(device::StorageDevice) -> Bool Indicates whether the storage availability or capacity of device `device may vary according to external forces, such as other unrelated processes or OS behavior. When `true`, this implies that the ability of `device to store data is completely arbitrary. Typically this means that calls to storage availability queries can return different results, even if no storage calls have been made on `device. When `false`, it may be reasonable to assume that exact accounting of storage availability is possible, although it is not guaranteed. There are also no guarantees that allocations will not trigger forced OS memory reclamation (such as by the Linux OOM killer). """ externally_varying(::StorageDevice) = true """ initial_leaf_device(device::StorageDevice) -> StorageDevice Returns the preferred initial leaf device for the root device `device`. Defaults to returning `device` itself. """ initial_leaf_device(device::StorageDevice) = device """ CPURAMDevice <: StorageDevice Stores data in memory. This is the default device. """ struct CPURAMDevice <: StorageDevice end storage_resources(dev::CPURAMDevice) = Set{StorageResource}([CPURAMResource()]) storage_capacity(::CPURAMDevice, res::CPURAMResource) = storage_capacity(res) storage_capacity(::CPURAMDevice) = storage_capacity(CPURAMResource()) storage_available(::CPURAMDevice, res::CPURAMResource) = storage_available(res) storage_available(::CPURAMDevice) = storage_available(CPURAMResource()) storage_utilized(::CPURAMDevice, res::CPURAMResource) = storage_utilized(res) storage_utilized(::CPURAMDevice) = storage_utilized(CPURAMResource()) function write_to_device!(device::CPURAMDevice, state::RefState, ref_id::Int) sstate = storage_read(state) if sstate.data === nothing data = read_from_device(first(sstate.leaves).device, state, ref_id, true) notify(storage_rcu!(state) do sstate StorageState(sstate; data=Some{Any}(data)) end) end return end function read_from_device(::CPURAMDevice, state::RefState, ref_id::Int, ret::Bool) if ret sstate = storage_read(state) if sstate.data === nothing # TODO: @assert !(sstate.leaf isa CPURAMDevice) "Data lost!" return read_from_device(first(sstate.leaves).device, state, ref_id, true) end return something(sstate.data) end end function delete_from_device!(::CPURAMDevice, state::RefState, ref_id::Int) notify(storage_rcu!(state) do sstate StorageState(sstate; data=nothing) end) return end isretained(state::RefState, ::CPURAMDevice) = false """ GenericFileDevice{S,D,F,M} <: StorageDevice Stores data in a file on a filesystem (within a specified directory), using `S` to serialize data and `D` to deserialize data. If `F` is `true`, then `S` and `D` operate on an `IO` object which is pointing to the file; if `false`, `S` and `D` operate on a `String` path to the file. If `M` is `true`, uses `MMWrap` to allowing memory mapping of data; if `false`, memory mapping is disabled. Also supports optional ser/des filtering stages, such as for compression or encryption. `filters` are an optional set of pairs of filtering functions to apply; the first in a pair is the serialization filter, and the second is the deserialization filter. A `String`-typed tag is supported to specify the path to save the file at, which should be relative to `dir`. """ struct GenericFileDevice{S,D,F,M} <: StorageDevice fs::FilesystemResource dir::String filters::Vector{Pair} end """ GenericFileDevice{S,D,F,M}(fs::FilesystemResource, dir::String; filters) where {S,D,F,M} GenericFileDevice{S,D,F,M}(dir::String; filters) where {S,D,F,M} GenericFileDevice{S,D,F,M}(; filters) where {S,D,F,M} Construct a `GenericFileDevice` which stores data in the directory `dir`. It is assumed that `dir` is located on the filesystem specified by `fs`, which is inferred if unspecified. If `dir` is unspecified, then files are stored in an arbitrary location. See [`GenericFileDevice`](@ref) for further details. """ GenericFileDevice{S,D,F,M}(fs, dir; filters=Pair[]) where {S,D,F,M} = GenericFileDevice{S,D,F,M}(fs, dir, filters) GenericFileDevice{S,D,F,M}(dir; filters=Pair[]) where {S,D,F,M} = GenericFileDevice{S,D,F,M}(FilesystemResource(Sys.iswindows() ? "C:" : "/"), dir, filters) # FIXME: FS path GenericFileDevice{S,D,F,M}(; filters=Pair[]) where {S,D,F,M} = GenericFileDevice{S,D,F,M}(joinpath(tempdir(), ".mempool"); filters) storage_resources(dev::GenericFileDevice) = Set{StorageResource}([dev.fs]) function storage_capacity(dev::GenericFileDevice, res::FilesystemResource) check_same_resource(dev, dev.fs, res) storage_capacity(res) end storage_capacity(dev::GenericFileDevice) = storage_capacity(dev.fs) function storage_available(dev::GenericFileDevice, res::FilesystemResource) check_same_resource(dev, dev.fs, res) storage_available(res) end storage_available(dev::GenericFileDevice) = storage_available(dev.fs) function storage_utilized(dev::GenericFileDevice, res::FilesystemResource) check_same_resource(dev, dev.fs, res) return storage_capacity(dev, res) - storage_available(dev, res) end storage_utilized(dev::GenericFileDevice) = storage_capacity(dev, dev.fs) - storage_available(dev, dev.fs) function write_to_device!(device::GenericFileDevice{S,D,F,M}, state::RefState, ref_id::Int) where {S,D,F,M} mkpath(device.dir) sstate = storage_read(state) data = sstate.data tag = tag_read(state, sstate, device) path = if tag !== nothing touch(joinpath(device.dir, tag)) else tempname(device.dir; cleanup=false) end fref = FileRef(path, state.size) if data === nothing data = read_from_device(first(sstate.leaves).device, state, ref_id, true) else data = something(data) end leaf = StorageLeaf(device) sstate = storage_rcu!(state) do sstate StorageState(sstate; leaves=vcat(sstate.leaves, leaf)) end errormonitor(Threads.@spawn begin if F open(path, "w+") do io for (write_filt, ) in reverse(device.filters) io = write_filt(io) end S(io, (M ? MMWrap : identity)(data)) close(io) end else if length(device.filters) > 0 throw(ArgumentError("Cannot use filters with $(typeof(device))\nPlease use a different device if filtering is required")) end S(path, data) end leaf.handle = Some{Any}(fref) notify(sstate) end) return end function read_from_device(device::GenericFileDevice{S,D,F,M}, state::RefState, id::Int, ret::Bool) where {S,D,F,M} sstate = storage_read(state) data = sstate.data if data !== nothing ret && return something(data) return end idx = findfirst(l->l.device === device, sstate.leaves) fref = something(sstate.leaves[idx].handle) sstate = storage_rcu!(state) do sstate StorageState(sstate) end errormonitor(Threads.@spawn begin data = if F open(fref.file, "r") do io for (_, read_filt) in reverse(device.filters) io = read_filt(io) end (M ? unwrap_payload : identity)(D(io)) end else if length(device.filters) > 0 throw(ArgumentError("Cannot use filters with $(typeof(device))\nPlease use a different device if filtering is required")) end D(fref.file) end sstate.data = Some{Any}(data) notify(sstate) end) if ret return something(sstate.data) end end function delete_from_device!(device::GenericFileDevice, state::RefState, id::Int) sstate = storage_read(state) idx = findfirst(l->l.device === device, sstate.leaves) idx === nothing && return leaf = sstate.leaves[idx] new_sstate = storage_rcu!(state) do sstate StorageState(sstate; leaves=filter(l->l.device !== device, sstate.leaves)) end if leaf.retain notify(new_sstate) else fref = something(leaf.handle) errormonitor(Threads.@spawn begin rm(fref.file; force=true) notify(new_sstate) end) end return end # For convenience and backwards-compatibility """ SerializationFileDevice === GenericFileDevice{serialize,deserialize,true,true} Stores data using the `Serialization` stdlib to serialize and deserialize data. See `GenericFileDevice` for further details. """ const SerializationFileDevice = GenericFileDevice{serialize,deserialize,true,true} """ SimpleRecencyAllocator <: StorageDevice A simple LRU allocator device which manages an `upper` device and a `lower` device. The `upper` device is be limited to `upper_limit` bytes of storage; when an allocation exceeds this limit, the least recently accessed data will be moved to the `lower` device (which is unbounded), and the new allocation will be moved to the `upper` device. Consider using an `upper` device of `CPURAMDevice` and a `lower` device of `GenericFileDevice` to implement a basic swap-to-disk allocator. Such a device will be created and used automatically if the environment variable `JULIA_MEMPOOL_EXPERIMENTAL_FANCY_ALLOCATOR` is set to `1` or `true`. """ struct SimpleRecencyAllocator <: StorageDevice mem_limit::UInt64 device::StorageDevice device_limit::UInt64 policy::Symbol # Most recently used elements are always at the front mem_refs::Vector{Int} device_refs::Vector{Int} # Cached sizes mem_size::Base.RefValue{UInt64} device_size::Base.RefValue{UInt64} # Counters for Hit, Miss, Evict stats::Vector{Int} # Whether to retain all tracked refs on the device retain::Base.RefValue{Bool} ref_cache::Dict{Int,RefState} lock::NonReentrantLock function SimpleRecencyAllocator(mem_limit, device, device_limit, policy; retain=false) mem_limit > 0 || throw(ArgumentError("Memory limit must be positive and non-zero: $mem_limit")) device_limit > 0 || throw(ArgumentError("Device limit must be positive and non-zero: $device_limit")) policy in (:LRU, :MRU) || throw(ArgumentError("Invalid recency policy: $policy")) return new(mem_limit, device, device_limit, policy, Int[], Int[], Ref(UInt64(0)), Ref(UInt64(0)), zeros(Int, 3), Ref(retain), Dict{Int,RefState}(), NonReentrantLock()) end end function Base.show(io::IO, sra::SimpleRecencyAllocator) mem_res = CPURAMResource() mem_used = Base.format_bytes(storage_utilized(sra, mem_res)) device_used = 0 for res in storage_resources(sra.device) device_used += storage_utilized(sra, res) end device_used = Base.format_bytes(device_used) mem_limit = Base.format_bytes(sra.mem_limit) device_limit = Base.format_bytes(sra.device_limit) println(io, "SimpleRecencyAllocator(mem: $mem_used used ($mem_limit max), device($(sra.device)): $device_used used ($device_limit max), policy: $(sra.policy))") print(io, " Stats: $(sra.stats[1]) Hits, $(sra.stats[2]) Misses, $(sra.stats[3]) Evicts") end storage_resources(sra::SimpleRecencyAllocator) = Set{StorageResource}([CPURAMResource(), storage_resources(sra.device)...]) function storage_capacity(sra::SimpleRecencyAllocator, res::StorageResource) if res isa CPURAMResource return sra.mem_limit elseif res in storage_resources(sra.device) return sra.device_limit else throw(ArgumentError("Invalid storage resource $res for device $sra")) end end storage_capacity(sra::SimpleRecencyAllocator) = sra.mem_limit + sra.device_limit storage_available(sra::SimpleRecencyAllocator, res::StorageResource) = storage_capacity(sra, res) - storage_utilized(sra, res) storage_available(sra::SimpleRecencyAllocator) = storage_capacity(sra) - storage_utilized(sra) function storage_utilized(sra::SimpleRecencyAllocator, res::StorageResource) if res isa CPURAMResource return sra.mem_size[] elseif res in storage_resources(sra.device) return sra.device_size[] else throw(ArgumentError("Invalid storage resource $res for device $sra")) end end storage_utilized(sra::SimpleRecencyAllocator) = sra.mem_size[] + sra.device_size[] function write_to_device!(sra::SimpleRecencyAllocator, state::RefState, ref_id::Int) with_lock(sra.lock) do sra.ref_cache[ref_id] = state end try if state.size > sra.mem_limit || state.size > sra.device_limit # Too bulky throw(ArgumentError("Cannot write ref $ref_id of size $(Base.format_bytes(state.size)) to LRU")) end sra_migrate!(sra, state, ref_id, missing) catch err with_lock(sra.lock) do delete!(sra.ref_cache, ref_id) end rethrow(err) end return end function sra_migrate!(sra::SimpleRecencyAllocator, state::RefState, ref_id, to_mem; read=false, locked=false) ref_size = state.size # N.B. "from" is the device where *other* refs are migrating from, # and go to the "to" device. The passed-in ref is inserted into the # "from" device. if ismissing(to_mem) # Try to minimize reads/writes. Note the type assertion to help the # compiler with inference, this removes some Base._any() invalidations. sstate::StorageState = storage_read(state) if sstate.data !== nothing # Try to keep it in memory to_mem = true elseif any(l->l.device === sra.device, sstate.leaves) # Try to leave it on device to_mem = false else # Fallback # TODO: Be smarter? to_mem = true end end with_lock(sra.lock, !locked) do ref_size = state.size if to_mem # Demoting to device from_refs = sra.mem_refs from_limit = sra.mem_limit from_device = CPURAMDevice() from_size = sra.mem_size to_refs = sra.device_refs to_limit = sra.device_limit to_device = sra.device to_size = sra.device_size else # Promoting to memory from_refs = sra.device_refs from_limit = sra.device_limit from_device = sra.device from_size = sra.device_size to_refs = sra.mem_refs to_limit = sra.mem_limit to_device = CPURAMDevice() to_size = sra.mem_size end idx = if sra.policy == :LRU to_mem ? lastindex(from_refs) : firstindex(from_refs) else to_mem ? firstindex(from_refs) : lastindex(from_refs) end if ref_id in from_refs # This ref is already where it needs to be @goto write_done end # Plan a batch of writes write_list = Int[] while ref_size + from_size[] > from_limit # Demote older/promote newer refs until space is available @assert 1 <= idx <= length(from_refs) "Failed to migrate $(Base.format_bytes(ref_size)) for ref $ref_id" oref = from_refs[idx] oref_state = sra.ref_cache[oref] oref_size = oref_state.size if (to_mem && sra.retain[]) || ((oref_size + to_size[] <= to_limit) && !isretained(oref_state, from_device)) # Retention is active while demoting to device, or else destination has space for this ref push!(write_list, idx) if !(!to_mem && sra.retain[]) from_size[] -= oref_size end to_size[] += oref_size end idx += (to_mem ? -1 : 1) * (sra.policy == :LRU ? 1 : -1) end if isempty(write_list) @goto write_ref end # Initiate writes @sync for idx in write_list @inbounds sra.stats[3] += 1 oref = from_refs[idx] oref_state = sra.ref_cache[oref] # N.B. We `write_to_device!` before deleting from old device, in case # the write fails (so we don't lose data) write_to_device!(to_device, oref_state, oref) if !(!to_mem && sra.retain[]) @async delete_from_device!(from_device, oref_state, oref) end end # Update counters to_delete = Int[] for oref in Iterators.map(idx->from_refs[idx], write_list) push!(to_refs, oref) if !(!to_mem && sra.retain[]) push!(to_delete, findfirst(==(oref), from_refs)) end end foreach(idx->deleteat!(from_refs, idx), reverse(to_delete)) @label write_ref # Space available, perform migration pushfirst!(from_refs, ref_id) from_size[] += state.size sstate = storage_read(state) if findfirst(l->l.device === from_device, sstate.leaves) === nothing # If this ref isn't already on the target device write_to_device!(from_device, state, ref_id) end # Write-through to device if retaining if to_mem && sra.retain[] write_to_device!(to_device, state, ref_id) end # If we already had this ref, delete it from previous device if !(to_mem && sra.retain[]) && findfirst(x->x==ref_id, to_refs) !== nothing delete_from_device!(to_device, state, ref_id) deleteat!(to_refs, findfirst(x->x==ref_id, to_refs)) to_size[] -= state.size end @label write_done if read return read_from_device(from_device, state, ref_id, true) end end end function read_from_device(sra::SimpleRecencyAllocator, state::RefState, id::Int, ret::Bool) with_lock(sra.lock) do idx = findfirst(x->x==id, sra.mem_refs) if idx !== nothing @inbounds sra.stats[1] += 1 deleteat!(sra.mem_refs, idx) pushfirst!(sra.mem_refs, id) return read_from_device(CPURAMDevice(), state, id, ret) end @assert id in sra.device_refs @inbounds sra.stats[2] += 1 value = sra_migrate!(sra, state, id, true; read=true, locked=true) if ret return value end end end function delete_from_device!(sra::SimpleRecencyAllocator, state::RefState, id::Int) with_lock(sra.lock) do if sra.retain[] # Migrate to device for retention sra_migrate!(sra, state, id, false; read=false, locked=true) end if (idx = findfirst(x->x==id, sra.mem_refs)) !== nothing delete_from_device!(CPURAMDevice(), state, id) deleteat!(sra.mem_refs, idx) sra.mem_size[] -= state.size end if (idx = findfirst(x->x==id, sra.device_refs)) !== nothing if !sra.retain[] delete_from_device!(sra.device, state, id) end deleteat!(sra.device_refs, idx) sra.device_size[] -= state.size end delete!(sra.ref_cache, id) end return end function retain_on_device!(sra::SimpleRecencyAllocator, retain::Bool) with_lock(sra.lock) do sra.retain[] = retain for id in sra.device_refs retain_on_device!(sra.device, sra.ref_cache[id], id, true) end for id in copy(sra.mem_refs) sra_migrate!(sra, sra.ref_cache[id], id, false; read=false, locked=true) retain_on_device!(sra.device, sra.ref_cache[id], id, true) end end end externally_varying(::SimpleRecencyAllocator) = false initial_leaf_device(sra::SimpleRecencyAllocator) = CPURAMDevice() const GLOBAL_DEVICE = Ref{StorageDevice}(CPURAMDevice())
MemPool
https://github.com/JuliaData/MemPool.jl.git
[ "MIT" ]
0.4.9
b15bc48d53bf910cb0dc59ec92d7c147eeda1de1
code
31707
include("testenv.jl") if nprocs() == 1 addprocs_with_testenv(2) end @everywhere ENV["JULIA_MEMPOOL_EXPERIMENTAL_FANCY_ALLOCATOR"] = "0" using Serialization, Random @everywhere using Sockets @everywhere using MemPool import MemPool: CPURAMDevice, SerializationFileDevice, SimpleRecencyAllocator import MemPool: storage_read using Test import Sockets: getipaddr function roundtrip(x, eq=(==), io=IOBuffer()) mmwrite(Serializer(io), x) y = deserialize(seekstart(io)) try @assert eq(y, x) @test eq(y, x) catch err println("Expected: ", x) println("Deserialized: ", y) rethrow(err) end end primitive type TestInt160 160 end @testset "Array" begin roundtrip(rand(10)) roundtrip(BitArray(rand(Bool,10))) roundtrip(map(Ref, rand(10)), (x,y)->getindex.(x) == getindex.(y)) mktemp() do path, f roundtrip(Vector{TestInt160}(undef, 10), (x,y)->true, f) end io = IOBuffer() x = Array{Union{}}(undef, 10) mmwrite(Serializer(io), x) y = deserialize(seekstart(io)) @test typeof(y) == Array{Union{},1} @test length(y) == 10 @test MemPool.fixedlength(Tuple{String, Int}) == -1 end using StaticArrays @testset "StaticArrays" begin x = [@MVector(rand(75)) for i=1:100] io = IOBuffer() mmwrite(Serializer(io), x) alloc = @allocated mmwrite(Serializer(seekstart(io)), x) @test deserialize(seekstart(io)) == x @test MemPool.approx_size(x) == 75*100*8 end @testset "Array{String}" begin roundtrip([randstring(rand(1:10)) for i=1:4]) sa = Array{String}(undef, 2) sa[1] = "foo" io = IOBuffer() mmwrite(Serializer(io), sa) sa2 = deserialize(seekstart(io)) @test sa2[1] == "foo" @test !isassigned(sa2, 2) end @testset "approx_size" begin @testset "approx_size $(typeof(x))" for x in ( "foo~^Γ₯&", SubString("aaaaa", 2), ) @test MemPool.approx_size(x) == Base.summarysize(x) end @testset "approx_size Symbol" begin s = Symbol("foo~^Γ₯&") @test MemPool.approx_size(s) == Base.summarysize(String(s)) end end mutable struct Empty end import Base: == ==(a::Empty, b::Empty) = true @testset "Array{Empty}" begin roundtrip([Empty() for i=1:4]) roundtrip([(Empty(),) for i=1:4]) end @testset "Array{Union{Nothing,Vector}}" begin roundtrip([nothing, Int[]]) @test MemPool.fixedlength(Union{Nothing,Vector{Int}}) == -1 # Issue #43. end @testset "DRef equality" begin d1 = poolset(1) d2 = copy(d1) @test d1 == d2 @test hash(d1) == hash(d2) d = Dict{DRef, Int}() d[d1] = 1 @test haskey(d, d1) @test haskey(d, d2) @test d[d2] == 1 end @testset "Set-Get" begin r1 = poolset([1,2]) r2 = poolset(["abc","def"], 2) r3 = poolset([Ref(1),Ref(2)], 2) @test poolget(r1) == [1,2] @test poolget(r2) == ["abc","def"] @test map(getindex, poolget(r3)) == [1,2] end @testset "Distributed reference counting" begin @testset "Owned locally" begin # Owned by us r1 = poolset([1,2]) key1 = (r1.owner, r1.id) id1 = r1.id # We know about this DRef @test haskey(MemPool.datastore_counters, key1) # We own it, and are told when others receive it, but it hasn't been passed around yet @test MemPool.datastore_counters[key1].worker_counter[] == 1 @test length(MemPool.datastore_counters[key1].recv_counters) == 0 # We hold a local reference to it @test MemPool.datastore_counters[key1].local_counter[] == 1 # We haven't sent it to anyone @test length(MemPool.datastore_counters[key1].send_counters) == 0 # They don't know about it @test fetch(@spawnat 2 !haskey(MemPool.datastore_counters, key1)) # Send them a copy @everywhere [2] begin # They know about this DRef @assert haskey(MemPool.datastore_counters, $key1) # They don't own it, and aren't told when others receive it @assert MemPool.datastore_counters[$key1].worker_counter[] == 0 @assert length(MemPool.datastore_counters[$key1].recv_counters) == 0 # They hold a local reference to it @assert MemPool.datastore_counters[$key1].local_counter[] == 1 # They haven't sent it to anyone @assert length(MemPool.datastore_counters[$key1].send_counters) == 0 # Here to ensure r1 is serialized and retained const r1_ref = Ref{Any}($r1) end # We've sent it to them and are tracking that @test MemPool.datastore_counters[key1].worker_counter[] == 2 # Delete their copy @everywhere [2] begin r1_ref[] = nothing GC.gc(); sleep(0.5) end GC.gc(); sleep(0.5) # They don't know about it (anymore) @test fetch(@spawnat 2 !haskey(MemPool.datastore_counters, key1)) # They're done with it and we're aware of that @test MemPool.datastore_counters[key1].worker_counter[] == 1 end @testset "Owned remotely" begin # Owned by worker 2 ("them") r2 = poolset([1,2], 2) key2 = (r2.owner, r2.id) id2 = r2.id # Give us some time to tell them we received r2 @everywhere GC.gc() sleep(1) # We know about this DRef @test haskey(MemPool.datastore_counters, key2) # We don't own it, and aren't told when others receive it @test MemPool.datastore_counters[key2].worker_counter[] == 0 @test length(MemPool.datastore_counters[key2].recv_counters) == 0 # We hold a local reference to it @test MemPool.datastore_counters[key2].local_counter[] == 1 # We haven't sent it to anyone (yet) @test length(MemPool.datastore_counters[key2].send_counters) == 0 @everywhere [2] begin # They know about this DRef @assert haskey(MemPool.datastore_counters, $key2) # They own it, and are told when others receive it (and we have received it, but they're already aware of that) @assert MemPool.datastore_counters[$key2].worker_counter[] >= 1 @assert length(MemPool.datastore_counters[$key2].recv_counters) == 0 # They don't hold a local reference to it @assert MemPool.datastore_counters[$key2].local_counter[] == 0 # They haven't sent it to anyone (recently) @assert length(MemPool.datastore_counters[$key2].send_counters) == 0 end # Send them a copy @everywhere [2] begin const r2_ref = Ref{Any}($r2) # They hold a local reference to it @assert MemPool.datastore_counters[$key2].local_counter[] == 1 # They know about our and their copies @assert MemPool.datastore_counters[$key2].worker_counter[] == 2 end # Delete our copy r2 = nothing @everywhere GC.gc() sleep(0.5) # We don't know about this DRef (anymore) @test_broken !haskey(MemPool.datastore_counters, key2) @test_skip "They only see their copy" #= @everywhere [2] begin # They only see their copy @assert MemPool.datastore_counters[$key2].worker_counter[] == 1 end =# # Delete their copy @everywhere [2] begin r2_ref[] = nothing GC.gc(); sleep(0.5) end @test_skip "They don't know about this DRef (anymore)" #= @everywhere [2] begin # They don't know about this DRef (anymore) @assert !haskey(MemPool.datastore_counters, $key2) end =# end end @testset "Destructors" begin ref_del = Ref(false) # @eval because testsets retain values in weird ways x = @eval Ref{Any}(poolset(123; destructor=()->(@assert !$ref_del[]; $ref_del[]=true;))) @test !ref_del[] x[] = nothing GC.gc(); yield() @test ref_del[] end @testset "Migration" begin A = WeakRef([1,2,3]) x = poolset(A.value) poolget(x) x_new = MemPool.migrate!(x, 2) @test x_new isa DRef @test x_new !== x @test x_new.owner == 2 @test poolget(x_new) == [1,2,3] @test poolget(x) == [1,2,3] @test MemPool.storage_read(MemPool.datastore[x.id]).data === nothing #= FIXME GC.gc(); yield() @test A.value === nothing =# end @testset "StorageState" begin sstate1 = MemPool.StorageState(nothing, MemPool.StorageLeaf[], CPURAMDevice()) @test sstate1 isa MemPool.StorageState @test sstate1.ready isa Base.Event @test !sstate1.ready.set notify(sstate1) @test sstate1.ready.set @test sstate1.root isa CPURAMDevice @test length(sstate1.leaves) == 0 @test sstate1.data === nothing @test sstate1 === sstate1 sstate2 = MemPool.StorageState(sstate1; data=Some{Any}(123)) @test sstate2 !== sstate1 @test sstate2.ready !== sstate1.ready @test !sstate2.ready.set notify(sstate2) @test sstate2.root isa CPURAMDevice @test length(sstate2.leaves) == 0 @test sstate2.data isa Some{Any} @test something(sstate2.data) == 123 x1 = poolset([1,2,3]) @test MemPool.isinmemory(x1) state = MemPool.datastore[x1.id] sstate = MemPool.storage_read(state) @test sstate === MemPool.storage_read(state) wait(sstate) @test sstate.ready.set @test length(sstate.leaves) == 0 @test sstate.root isa CPURAMDevice @test sstate.data isa Some{Any} @test something(sstate.data) == [1,2,3] sdevice = MemPool.SerializationFileDevice(joinpath(MemPool.default_dir(), randstring(6))) MemPool.set_device!(sdevice, x1) @test MemPool.isondisk(x1) @test MemPool.datastore[x1.id] === state new_sstate = MemPool.storage_read(state) @test sstate !== new_sstate wait(new_sstate) @test new_sstate.ready.set @test new_sstate.root === sdevice @test new_sstate.data isa Some{Any} @test length(new_sstate.leaves) == 1 leaf = first(new_sstate.leaves) @test leaf.device === sdevice @test leaf.handle isa Some{Any} @test something(leaf.handle) isa FileRef end @testset "RefState" begin sstate1 = MemPool.StorageState(Some{Any}(123), MemPool.StorageLeaf[], CPURAMDevice(), Base.Event()) notify(sstate1) state = MemPool.RefState(sstate1, 64; tag="abc", leaf_tag=MemPool.Tag(SerializationFileDevice=>123)) @test state.size == 64 @test MemPool.storage_size(state) == 64 @test_throws ArgumentError state.storage @test_throws ArgumentError state.tag @test_throws ArgumentError state.leaf_tag @test MemPool.tag_read(state, sstate1, CPURAMDevice()) == "abc" @test MemPool.tag_read(state, sstate1, SerializationFileDevice()) == 123 sstate2 = MemPool.storage_read(state) @test sstate2 isa MemPool.StorageState @test sstate2 === sstate1 @test_throws ArgumentError (state.storage = sstate1) sstate3 = MemPool.storage_rcu!(state) do old_sstate @test old_sstate === sstate1 # N.B. Not semantically correct to have CPURAMDevice as leaf leaf = MemPool.StorageLeaf(CPURAMDevice(), Some{Any}(456)) MemPool.StorageState(old_sstate; leaves=[leaf]) end @test !sstate3.ready.set notify(sstate3) @test sstate3 !== sstate1 @test sstate3.data isa Some{Any} @test something(sstate3.data) == 123 @test length(sstate3.leaves) == 1 leaf = first(sstate3.leaves) @test leaf.device isa CPURAMDevice @test leaf.handle isa Some{Any} @test something(leaf.handle) == 456 @test_throws ConcurrencyViolationError MemPool.storage_rcu!(old_sstate->old_sstate, state) end @testset "Tag" begin tag = MemPool.Tag() @test tag[SerializationFileDevice] == nothing tag = MemPool.Tag(SerializationFileDevice=>123) @test tag[SerializationFileDevice] == 123 @test tag[CPURAMDevice] === nothing tag = MemPool.Tag(SerializationFileDevice=>123, CPURAMDevice=>456) @test tag[SerializationFileDevice] == 123 @test tag[CPURAMDevice] == 456 @test tag[SimpleRecencyAllocator] === nothing end @testset "CPURAMDevice" begin # Delete -> read throws x = poolset(123) MemPool.delete_from_device!(CPURAMDevice(), x) @test_throws Exception poolget(x) end @testset "SerializationFileDevice" begin x1 = poolset([1,2,3]) state = MemPool.datastore[x1.id] data = something(storage_read(state).data) dirpath = mktempdir() sdevice = MemPool.SerializationFileDevice(dirpath) MemPool.set_device!(sdevice, x1.id) sstate = MemPool.storage_read(state) leaf = first(sstate.leaves) @test sstate.root === leaf.device === sdevice @test leaf.handle !== nothing @test something(leaf.handle) isa FileRef path = something(leaf.handle).file @test isdir(dirpath) @test isfile(path) @test normpath(joinpath(dirpath, basename(path))) == normpath(path) @test poolget(x1) == data # Retains the FileRef after read to memory @test first(storage_read(state).leaves).handle !== nothing MemPool.delete_from_device!(CPURAMDevice(), state, x1.id) @test storage_read(state).data === nothing @test poolget(x1) == data @test storage_read(state).data !== nothing MemPool.delete_from_device!(sdevice, state, x1) @test length(storage_read(state).leaves) == 0 # With retention, data is retained MemPool.set_device!(sdevice, state, x1) MemPool.retain_on_device!(sdevice, state, x1, true) sstate = storage_read(state) @test only(sstate.leaves).retain path = something(only(sstate.leaves).handle).file MemPool.delete_from_device!(sdevice, state, x1) @test length(storage_read(state).leaves) == 0 GC.gc(); sleep(1) # File is retained @test isfile(path) # Retention cannot be changed after deletion @test_throws ArgumentError MemPool.retain_on_device!(sdevice, state, x1, false) # Memory is retained @test storage_read(state).data !== nothing @test poolget(x1) == data # Without retention, data is lost MemPool.set_device!(sdevice, state, x1) MemPool.retain_on_device!(sdevice, state, x1, false) sstate = storage_read(state) @test !only(sstate.leaves).retain path = something(only(sstate.leaves).handle).file MemPool.delete_from_device!(sdevice, state, x1) @test length(storage_read(state).leaves) == 0 GC.gc(); sleep(1) # File is not retained @test !isfile(path) # Retention cannot be changed after deletion @test_throws ArgumentError MemPool.retain_on_device!(sdevice, state, x1, true) # Memory is retained @test storage_read(state).data !== nothing @test poolget(x1) == data @testset "Serialization Filters" begin struct BitOpSerializer{O,I} <: IO io::IO value::UInt8 out_op::O in_op::I end BitOpSerializer(io::IO, value::UInt8, op) = BitOpSerializer(io, value, op, op) Base.write(io::BitOpSerializer, x::UInt8) = write(io.io, io.out_op(x, io.value)) Base.read(io::BitOpSerializer, ::Type{UInt8}) = io.in_op(read(io.io, UInt8), io.value) Base.close(io::BitOpSerializer) = close(io.io) Base.eof(io::BitOpSerializer) = eof(io.io) # Symmetric filtering sdevice2 = SerializationFileDevice() push!(sdevice2.filters, (io->BitOpSerializer(io, 0x42, ⊻))=>(io->BitOpSerializer(io, 0x42, ⊻))) x1 = poolset(UInt8(123); device=sdevice2) MemPool.delete_from_device!(CPURAMDevice(), x1) path = something(only(storage_read(MemPool.datastore[x1.id]).leaves).handle).file # Filter is applied on-disk iob = IOBuffer(); serialize(iob, UInt8(123)); seek(iob, 0) @test read(path, UInt8) == first(take!(iob)) ⊻ 0x42 # Filter is undone on read @test poolget(x1) == UInt8(123) # Asymmetric filtering sdevice2 = SerializationFileDevice() push!(sdevice2.filters, (io->BitOpSerializer(io, 0x42, +, -))=>(io->BitOpSerializer(io, 0x42, +, -))) x1 = poolset(UInt8(123); device=sdevice2) MemPool.delete_from_device!(CPURAMDevice(), x1) path = something(only(storage_read(MemPool.datastore[x1.id]).leaves).handle).file # Filter is applied on-disk before serialization iob = IOBuffer(); serialize(iob, UInt8(123)); seek(iob, 0) @test read(path, UInt8) == first(take!(iob)) + 0x42 # Filter is undone on read @test poolget(x1) == UInt8(123) # Chained filtering sdevice2 = SerializationFileDevice() push!(sdevice2.filters, (io->BitOpSerializer(io, 0x3, +, -))=>(io->BitOpSerializer(io, 0x3, +, -))) push!(sdevice2.filters, (io->BitOpSerializer(io, 0x5, ⊻))=>(io->BitOpSerializer(io, 0x5, ⊻))) x1 = poolset(UInt8(123); device=sdevice2) MemPool.delete_from_device!(CPURAMDevice(), x1) path = something(only(storage_read(MemPool.datastore[x1.id]).leaves).handle).file # Filter is applied on-disk before serialization iob = IOBuffer(); serialize(iob, UInt8(123)); seek(iob, 0) value = first(take!(iob)) @test read(path, UInt8) == (value + 0x3) ⊻ 0x5 @test read(path, UInt8) != (value ⊻ 0x5) + 0x3 # Filter is undone on read @test poolget(x1) == UInt8(123) end @testset "Custom File Name" begin tag = "myfile.bin" ref = poolset(123; device=sdevice, tag) @test isfile(joinpath(dirpath, tag)) ref = nothing; GC.gc(); sleep(0.5) @test !isfile(joinpath(dirpath, tag)) end end sra_upper_pos(sra, ref) = findfirst(x->x==ref.id, sra.mem_refs) sra_lower_pos(sra, ref) = findfirst(x->x==ref.id, sra.device_refs) sra_inmem_pos(sra, ref, idx) = MemPool.isinmemory(ref) && !MemPool.isondisk(ref) && sra_upper_pos(sra, ref) == idx && sra_lower_pos(sra, ref) === nothing sra_ondisk_pos(sra, ref, idx) = !MemPool.isinmemory(ref) && MemPool.isondisk(ref) && sra_upper_pos(sra, ref) === nothing && sra_lower_pos(sra, ref) == idx @testset "SimpleRecencyAllocator" begin sdevice = SerializationFileDevice() # Garbage policy throws on creation @test_throws ArgumentError SimpleRecencyAllocator(1, sdevice, 1, :blah) # Memory and disk limits must be positive and non-zero @test_throws ArgumentError SimpleRecencyAllocator(0, sdevice, 1, :LRU) @test_throws ArgumentError SimpleRecencyAllocator(1, sdevice, 0, :LRU) @test_throws ArgumentError SimpleRecencyAllocator(0, sdevice, -1, :LRU) @test_throws ArgumentError SimpleRecencyAllocator(-1, sdevice, 0, :LRU) for sra in [MemPool.SimpleRecencyAllocator(8*10, sdevice, 8*10_000, :LRU), MemPool.SimpleRecencyAllocator(8*10, sdevice, 8*10_000, :MRU)] r1 = poolset([1,2]; device=sra) r2 = poolset([1,2,3]; device=sra) r3 = poolset([1,2,3,4,5]; device=sra) @test sra_inmem_pos(sra, r3, 1) @test sra_inmem_pos(sra, r2, 2) @test sra_inmem_pos(sra, r1, 3) for ref in [r1, r2, r3] @test haskey(sra.ref_cache, ref.id) @test sra.ref_cache[ref.id] === MemPool.datastore[ref.id] end # Add a ref that causes an eviction r4 = poolset([1,2]; device=sra) @test sra_inmem_pos(sra, r4, 1) if sra.policy == :LRU @test sra_inmem_pos(sra, r3, 2) @test sra_inmem_pos(sra, r2, 3) @test sra_ondisk_pos(sra, r1, 1) else @test sra_inmem_pos(sra, r2, 2) @test sra_inmem_pos(sra, r1, 3) @test sra_ondisk_pos(sra, r3, 1) end # Make an in-memory ref the most recently used @test poolget(r2) == [1,2,3] @test sra_inmem_pos(sra, r2, 1) if sra.policy == :LRU @test sra_inmem_pos(sra, r4, 2) @test sra_inmem_pos(sra, r3, 3) @test sra_ondisk_pos(sra, r1, 1) else @test sra_inmem_pos(sra, r4, 2) @test sra_inmem_pos(sra, r1, 3) @test sra_ondisk_pos(sra, r3, 1) end # Make an on-disk ref the most recently used if sra.policy == :LRU @test poolget(r1) == [1,2] @test sra_inmem_pos(sra, r1, 1) @test sra_inmem_pos(sra, r2, 2) @test sra_inmem_pos(sra, r4, 3) @test sra_ondisk_pos(sra, r3, 1) else @test poolget(r3) == [1,2,3,4,5] @test sra_inmem_pos(sra, r3, 1) @test sra_inmem_pos(sra, r4, 2) @test sra_inmem_pos(sra, r1, 3) @test sra_ondisk_pos(sra, r2, 1) end # Delete a ref that was in memory prev_mem_refs = copy(sra.mem_refs) prev_device_refs = copy(sra.device_refs) local del_id # FIXME: Somehow refs get retained? if sra.policy == :LRU del_id = r1.id #r1 = nothing MemPool.delete_from_device!(sra, r1) else del_id = r3.id #r3 = nothing MemPool.delete_from_device!(sra, r3) end @test_broken !haskey(MemPool.datastore, del_id) @test !in(del_id, sra.mem_refs) @test !in(del_id, sra.device_refs) @test !haskey(sra.ref_cache, del_id) @test sra.mem_refs == filter(id->id != del_id, prev_mem_refs) @test sra.device_refs == prev_device_refs # Delete a ref that was on disk prev_mem_refs = copy(sra.mem_refs) prev_device_refs = copy(sra.device_refs) if sra.policy == :LRU del_id = r3.id #r3 = nothing MemPool.delete_from_device!(sra, r3) else del_id = r2.id #r2 = nothing MemPool.delete_from_device!(sra, r2) end @test_broken !haskey(MemPool.datastore, del_id) @test !in(del_id, sra.mem_refs) @test !in(del_id, sra.device_refs) @test !haskey(sra.ref_cache, del_id) @test sra.mem_refs == prev_mem_refs @test sra.device_refs == filter(id->id != del_id, prev_device_refs) # Try to add a bulky object that doesn't fit in memory prev_mem_refs = copy(sra.mem_refs) prev_device_refs = copy(sra.device_refs) r7 = poolset(collect(1:11)) @test_throws ArgumentError MemPool.set_device!(sra, r7) @test sra.mem_refs == prev_mem_refs @test sra.device_refs == prev_device_refs @test !haskey(sra.ref_cache, r7.id) # Add a bulky object that doesn't fit in memory or on disk prev_mem_refs = sra.mem_refs prev_device_refs = sra.device_refs r8 = poolset(collect(1:10_001)) @test_throws ArgumentError MemPool.set_device!(sra, r8) @test sra.mem_refs == prev_mem_refs @test sra.device_refs == prev_device_refs @test !haskey(sra.ref_cache, r8.id) end # Whole-device retention applies to all objects dirname = mktempdir() sdevice2 = SerializationFileDevice(dirname) sra = MemPool.SimpleRecencyAllocator(8*10, sdevice2, 8*10_000, :LRU) refs = [poolset(123; device=sra) for i in 1:8] @test isempty(sra.device_refs) MemPool.retain_on_device!(sra, true) # Retention is immediate @test !isempty(sra.device_refs) @test length(readdir(dirname)) == 8 # Files still exist after refs expire refs = nothing; GC.gc(); sleep(0.5) @test isempty(sra.mem_refs) @test isempty(sra.device_refs) @test length(readdir(dirname)) == 8 # Counters are properly cleared (https://github.com/JuliaParallel/DTables.jl/issues/60) sra = MemPool.SimpleRecencyAllocator(8*10, sdevice2, 8*10_000, :LRU) function generate() poolset(collect(1:10); device=sra) poolset(collect(1:10); device=sra) return end generate() @test sra.mem_size[] > 0 @test sra.device_size[] > 0 for _ in 1:3 GC.gc() yield() end @test sra.mem_size[] == 0 @test sra.device_size[] == 0 end @testset "Mountpoints and Disk Stats" begin mounts = MemPool.mountpoints() @test mounts isa Vector{String} @test length(mounts) > 0 if Sys.iswindows() @test "C:" in mounts else @test "/" in mounts end for mount in mounts if ispath(mount) stats = MemPool.disk_stats(mount) @test stats.available > 0 @test stats.capacity > 0 @test stats.available < stats.capacity end end end @testset "High-level APIs" begin sdevice = SerializationFileDevice() devices = [CPURAMDevice(), sdevice, SimpleRecencyAllocator(8, sdevice, 1024^3, :LRU)] # Resource queries work correctly for device in devices resources = MemPool.storage_resources(device) @test length(resources) >= 1 @test length(unique(resources)) == length(resources) for resource in resources @test MemPool.storage_capacity(device, resource) > 0 @test MemPool.storage_available(device, resource) >= 0 @test MemPool.storage_utilized(device, resource) >= 0 @test MemPool.externally_varying(device) isa Bool if !MemPool.externally_varying(device) @test MemPool.storage_capacity(device, resource) == MemPool.storage_available(device, resource) + MemPool.storage_utilized(device, resource) end end # Wrong resource passed to resource queries throw errors wrong_res = MemPool.FilesystemResource("/fake/path") @test_throws ArgumentError MemPool.storage_capacity(device, wrong_res) @test_throws ArgumentError MemPool.storage_available(device, wrong_res) @test_throws ArgumentError MemPool.storage_utilized(device, wrong_res) end # All APIs accept either DRef, ref ID, or RefState as identifier # Either state or device may be passed # Redundant set, read, write, retain, delete are allowed # Non-read calls return nothing # set_device! sets root and leaf for device in devices x1 = poolset(123) state = MemPool.datastore[x1.id] # set_device! requires access to ref ID and device to set @test MemPool.set_device!(device, x1) === MemPool.set_device!(device, state, x1) === MemPool.set_device!(device, x1.id) === MemPool.set_device!(device, state, x1.id) === nothing @test MemPool.isinmemory(x1) == MemPool.isinmemory(x1.id) == MemPool.isinmemory(state) @test MemPool.isondisk(x1) == MemPool.isondisk(x1.id) == MemPool.isondisk(state) # Root and leaf are set appropriately sstate = storage_read(state) @test sstate.root === device if device isa CPURAMDevice @test length(sstate.leaves) == 0 elseif device isa SerializationFileDevice @test length(sstate.leaves) == 1 @test first(sstate.leaves).device === device elseif device isa SimpleRecencyAllocator x2 = poolset(456; device) # to push x1 to disk @test first(storage_read(state).leaves).device !== device end @test MemPool.read_from_device(state, x1, true) == MemPool.read_from_device(device, x1, true) == MemPool.read_from_device(device, state, x1, true) == MemPool.read_from_device(state, x1.id, true) == MemPool.read_from_device(device, x1.id, true) == MemPool.read_from_device(device, state, x1.id, true) # When ret == false, nothing is returned @test MemPool.read_from_device(state, x1, false) === MemPool.read_from_device(device, x1, false) === MemPool.read_from_device(device, state, x1, false) === MemPool.read_from_device(state, x1.id, false) === MemPool.read_from_device(device, x1.id, false) === MemPool.read_from_device(device, state, x1.id, false) === nothing @test MemPool.write_to_device!(state, x1) === MemPool.write_to_device!(device, x1) === MemPool.write_to_device!(device, state, x1) === MemPool.write_to_device!(state, x1.id) === MemPool.write_to_device!(device, x1.id) === MemPool.write_to_device!(device, state, x1.id) === nothing for mode in [true, false] @test MemPool.retain_on_device!(state, x1, mode; all=true) === MemPool.retain_on_device!(device, x1, mode; all=true) === MemPool.retain_on_device!(device, state, x1, mode; all=true) === MemPool.retain_on_device!(state, x1.id, mode; all=true) === MemPool.retain_on_device!(device, x1.id, mode; all=true) === MemPool.retain_on_device!(device, state, x1.id, mode; all=true) === nothing end @test MemPool.delete_from_device!(state, x1) === MemPool.delete_from_device!(device, x1) === MemPool.delete_from_device!(device, state, x1) === MemPool.delete_from_device!(state, x1.id) === MemPool.delete_from_device!(device, x1.id) === MemPool.delete_from_device!(device, state, x1.id) === nothing # Delete clears all leaf devices @test length(storage_read(state).leaves) == 0 end # Garbage ref IDs passed to APIs which don't take a RefState always throw for device in devices @test_throws Exception MemPool.set_device!(device, typemax(Int)) @test_throws Exception MemPool.read_from_device(device, typemax(Int), true) @test_throws Exception MemPool.write_to_device!(device, typemax(Int)) @test_throws Exception MemPool.delete_from_device!(device, typemax(Int)) end @testset "set_device! failure" begin # N.B. That this device doesn't fully conform to semantics struct FailingWriteStorageDevice <: MemPool.StorageDevice end MemPool.write_to_device!(::FailingWriteStorageDevice, ::MemPool.RefState, ::Any) = error("Failed to write") # Allocate directly throws and does not have datastore entry GC.gc(); sleep(1) len = length(MemPool.datastore) @test_throws ErrorException poolset(123; device=FailingWriteStorageDevice()) GC.gc(); sleep(1) @test length(MemPool.datastore) == len # Allocate, then set, throws and has datastore entry x = poolset(123) @test_throws ErrorException MemPool.set_device!(FailingWriteStorageDevice(), x) @test haskey(MemPool.datastore, x.id) end # Retention can be set in poolset x1 = poolset(123; device=sdevice) @test !only(storage_read(MemPool.datastore[x1.id]).leaves).retain x1 = poolset(123; device=sdevice, retain=true) @test only(storage_read(MemPool.datastore[x1.id]).leaves).retain MemPool.retain_on_device!(sdevice, x1, false) end #= TODO Allocate, write non-CPU A, write non-CPU B, handle for A was explicitly deleted Allocate, chain write and reads such that write starts before, and finishes after, read, ensure ordering is correct Stress RCU with many readers and one write-delete-read task =#
MemPool
https://github.com/JuliaData/MemPool.jl.git
[ "MIT" ]
0.4.9
b15bc48d53bf910cb0dc59ec92d7c147eeda1de1
code
1282
# This file is a part of Julia. License is MIT: https://julialang.org/license # This includes a few helper variables and functions that provide information about the # test environment (command line flags, current module, etc). # This file can be included multiple times in the same module if necessary, # which can happen with unisolated test runs. using Distributed if !(@isdefined testenv_defined) const testenv_defined = true if haskey(ENV, "JULIA_TEST_EXEFLAGS") const test_exeflags = `$(Base.shell_split(ENV["JULIA_TEST_EXEFLAGS"]))` else inline_flag = Base.JLOptions().can_inline == 1 ? `` : `--inline=no` cov_flag = `` if Base.JLOptions().code_coverage == 1 cov_flag = `--code-coverage=user` elseif Base.JLOptions().code_coverage == 2 cov_flag = `--code-coverage=all` end const test_exeflags = `$cov_flag $inline_flag --check-bounds=yes --startup-file=no --depwarn=error` end if haskey(ENV, "JULIA_TEST_EXENAME") const test_exename = `$(Base.shell_split(ENV["JULIA_TEST_EXENAME"]))` else const test_exename = Base.julia_cmd() end addprocs_with_testenv(X; kwargs...) = addprocs(X; exename=test_exename, exeflags=test_exeflags, kwargs...) end
MemPool
https://github.com/JuliaData/MemPool.jl.git
[ "MIT" ]
0.4.9
b15bc48d53bf910cb0dc59ec92d7c147eeda1de1
docs
275
# MemPool [![Build Status](https://travis-ci.org/JuliaData/MemPool.jl.svg?branch=master)](https://travis-ci.org/JuliaData/MemPool.jl) Flexible, high-performance datastore that supports custom serialization, with support for spilling unused data to disk and memory-mapping.
MemPool
https://github.com/JuliaData/MemPool.jl.git
[ "MIT" ]
0.1.1
7cf8421732ddbae810c2f230613a2b6ca0c928fe
code
358
module Fredholm using LinearAlgebra: I, β‹…, norm, Diagonal, mul! using ForwardDiff using NonNegLeastSquares abstract type AutoRegMethod end abstract type AbstractRegularization{T} end include("regularizations.jl") include("autoreg.jl") include("solve.jl") export invert, XuPei,LCurve,Tikhonov,SecondDerivative end
Fredholm
https://github.com/MatFi/Fredholm.jl.git
[ "MIT" ]
0.1.1
7cf8421732ddbae810c2f230613a2b6ca0c928fe
code
3370
struct LCurve{T} <: AutoRegMethod reg::T end function auto_reg(t,y,s,kernel::Function,method::LCurve;yoffset=true,tdomain=:realplus) creg = typeof(method.reg).name.wrapper if method.reg isa UnionAll Ξ»_ini = 1 creg = method.reg else Ξ»_ini = method.reg.Ξ± creg = typeof(method.reg).name.wrapper end function obj(Ξ») reg=creg(Ξ») AR,yr = build_ar(t,y,s,kernel,reg,yoffset) #TODO: could reduce allocs , problem with forward diff # p=load!(o.problem, AR,yr) sy = get_yt(AR,yr;tdomain=tdomain) ρ = (AR*sy)[1:length(y)].-y |>norm Ξ· = sy[1:end-1] |> norm return (ρ ,Ξ·) end #calculate curvatur function k(Ξ») dd=ForwardDiff.Dual{:a}(Ξ»,one(1.)) o=obj(dd) Ξ· = o[2].value ρ = o[1].value dΞ· = o[2].partials[1] c= -2*Ξ·*ρ/dΞ·*(Ξ»^2*dΞ·*ρ + 2*Ξ»*Ξ·*ρ + Ξ»^4*Ξ·*dΞ·)/((Ξ»^2*Ξ·^2+ ρ^2)^3/2 ) @debug "L-curvature" ΞΊ=c Ξ»=Ξ» # return c #supress negative curvatures if c<0 c=exp(c) else c=c+1 end return c end # return k(Ξ»_ini) #optimize on log scales Ξ»_opt = 10. ^gradient_decent(x ->log(1/k(10.0 ^x)),ini=log10(Ξ»_ini),d_ini=log10(Ξ»_ini)) @debug "found Ξ»" Ξ»=Ξ»_opt return creg( Ξ»_opt) end function gradient_decent(f;ini=1e-5,d_ini=ini,maxiters=40) k=f Ξ±_old =ini Ξ±_cur =ini+1 Ξ±_new =ini+1 dual=ForwardDiff.Dual{:c}(Ξ±_old,one(1.)) kevel= k(dual) Ξ”_old = kevel.partials[1] dual=ForwardDiff.Dual{:c}(Ξ±_new,one(1.)) kevel= k(dual) Ξ”_cur = kevel.partials[1] k_best= kevel.value k_cur=k_best Ξ±_opt=Ξ±_old for i in 1:maxiters Ξ±_cur=Ξ±_new dual=ForwardDiff.Dual{:c}(Ξ±_cur,one(1.)) kn= k(dual) Ξ”_cur = kn.partials[1] k_cur= kn.value # save best values if k_cur< k_best k_best=k_cur Ξ±_opt=Ξ±_cur end Ξ³=(abs(Ξ±_old-Ξ±_cur)*abs(Ξ”_cur-Ξ”_old))/((Ξ”_cur-Ξ”_old)^2+eps(Float64))#+1e-4 Ξ±0=1 Ξ±_new= clamp(Ξ±_cur-Ξ³*Ξ”_cur,Ξ±_cur-Ξ±0,Ξ±_cur+Ξ±0) #Ξ±_new=clamp(Ξ±_cur-Ξ”_cur,Ξ±_cur-Ξ±0,Ξ±_cur+Ξ±0) Ξ±_old=Ξ±_cur Ξ”_old=Ξ”_cur i>1 &&Ξ³<0.01 && abs(Ξ”_cur)<0.01 &&break end return Ξ±_opt end struct XuPei{T,S} <: AutoRegMethod reg::T Ξ»::S #shrinking parameter end XuPei(r::AbstractRegularization{T}) where {T} = XuPei(r,0.9) #from http://dx.doi.org/10.1016/j.flowmeasinst.2016.05.004 function auto_reg(t,y,s,kernel,method::XuPei;yoffset=true,tdomain=:realplus) creg = typeof(method.reg).name.wrapper Ξ» =method.Ξ» #define regularization vector if method.reg isa UnionAll Ξ›=ones(length(s)-1)*1e-3 creg = method.reg else Ξ›=ones(length(s)-1)*method.reg.Ξ± creg = typeof(method.reg).name.wrapper end res = Inf for i in 1:100 reg=creg(Ξ›) AR,yr = build_ar(t,y,s,kernel,reg,yoffset) sy= get_yt(AR,yr;tdomain=:real) sy[sy[1:end].<0][1:end-1].=0 res_new= norm((AR*sy)[1:length(y)]-y) reg_new = norm((AR*sy)[length(y)+1:end-1]) if res < res_new break end res=res_new sy=sy[1:end-1] Ξ›[sy.>0] .=Ξ›[sy.>0]*Ξ» end return creg(Ξ›) end
Fredholm
https://github.com/MatFi/Fredholm.jl.git
[ "MIT" ]
0.1.1
7cf8421732ddbae810c2f230613a2b6ca0c928fe
code
909
mutable struct Tikhonov{T} <: AbstractRegularization{T} Ξ±::T end Tikhonov() = Tikhonov get_regularization_matrix(regop::Tikhonov{T},N,ds) where {T} = Matrix(I(N)) mutable struct SecondDerivative{T} <: AbstractRegularization{T} Ξ±::T end SecondDerivative()=SecondDerivative function get_regularization_matrix(regop::SecondDerivative{T},N,ds) where {T} Ξ±=regop.Ξ± L=zeros( N+2,N) for j in 3:N L[j,j]= 1/(ds[j]*(ds[j]+ds[j-1]))# L[j,j-1]= -2*1/(ds[j-1]*(ds[j]))# L[j,j-2]= 1/(ds[j-1]*(ds[j]+ds[j-1]))# end # if regop.lower_bc L[1,1]=1/ds[1] L[2,1]=-2*1/(ds[1]*(ds[2])) L[2,2]=1/ds[2]*(ds[1]+ds[2]) # end # if regop.upper_bc L[N+1,N-1]=1/ds[N-1]*(ds[N]+ds[N-1]) L[N+1,N]=-2*1/((ds[N]*ds[N-1])) L[N+2,N]=1/ds[N] # end return L #L[:,end] .=0 #Do not regularize on the y offset end
Fredholm
https://github.com/MatFi/Fredholm.jl.git
[ "MIT" ]
0.1.1
7cf8421732ddbae810c2f230613a2b6ca0c928fe
code
2097
function rilt(t,y,smin,smax,N,α=missing;kwargs...) s = smin * (smax / smin).^range(0, 1, length=N + 1) invert(t,y,s,(t,s) -> exp(-t*s),Tikhonov(α),kwargs...) end function invert(t,y,s,kernel::Function,aamethod::AutoRegMethod;kwargs...) reg = auto_reg(t,y,s,kernel::Function,aamethod;kwargs...) invert(t,y,s,kernel,reg;kwargs...) end """ invert(s,y,t,k::Function,reg;yoffset=true,tdomain=:realplus) Compute the discretized form a(s) in y(t) = ∫a(s)k(t,s)ds. `s` and `y` represents the data wich we want to invert on discrete points `t`. # Examples ```julia-repl julia> bar([1, 2], [1, 2]) 1 ``` """ function invert(s,y,t,k::Function,reg;yoffset=true,kwargs...) AR,yr = build_ar(s,y,t,k,reg,yoffset) yt=get_yt(AR,yr;kwargs...) reshape return (t,yt[1:length(t),1],s,(AR*yt)[1:length(s),1]) end function get_yt(AR,yr;tdomain=:realplus) if tdomain ==:real sy = (AR \ yr) return sy end return nonneg_lsq(AR,yr;alg=:fnnls) end function build_ar(t,y,s,kernel,reg,yoffset) #TODO: whole function does allocate like hell when AutoRegMethod is present # cacheing must be implemented # central points sc= (s[1:end - 1] + s[2:end]) / 2 N=length(sc) ds = diff(s) dd=ones(length(ds)) L = get_regularization_matrix(reg,N,dd) #Build Matrix approximation of kernel sType = eltype(s) lType = eltype(L) aType =promote_type(sType,lType,eltype(t),eltype(ds)) A = aType[kernel(i, sc[j])*ds[j] for i in t, j in eachindex(sc)] if reg.α isa AbstractArray L= L*(Diagonal(reg.α)) else L= L*(Diagonal(reg.α*ones(aType,length(sc)))) end if yoffset #add a ofset column A= hcat(A,ones(size(A,1))) #Do not regularize the y-offset L= hcat(L,zeros(size(L,1))) # push!(ds, ) push!(sc, 0) end AR = vcat(A, L) # add entry to y to store the regularization yr = vcat(y, zeros(size(L,1 ))) yr=convert.((eltype(AR),),yr) #allows for automatic dfferentation NNLS return (AR,yr) end
Fredholm
https://github.com/MatFi/Fredholm.jl.git
[ "MIT" ]
0.1.1
7cf8421732ddbae810c2f230613a2b6ca0c928fe
code
894
@testset "Fredholm.jl" begin Random.seed!(42) F(t) = exp(-(t - 2)^2 / (2 * 0.3^2)) + exp(-(t - 3)^2 / (2 * 0.3^2)) y(s) = quadgk(t -> F(t) * exp(-t * s), 0, Inf, rtol=1e-6)[1] s = 10.0.^(-2:0.05:1) # generate discrete example data ys = map(y, s) # from this is we want to approximate F(t) noise = (randn(length(ys))) * 0.001 ti = 0:0.05:5 |> collect # define the t-domain for the solution Ξ± = 6e-4 regularizations = [Tikhonov, SecondDerivative] for r in regularizations t, yt = invert(s, ys .+ noise, ti, (t, s) -> exp(-t * s), r(Ξ±)) @test norm(F.(ti) .- yt) < 1.5 t, yt = invert(s, ys .+ noise, ti, (t, s) -> exp(-t * s), LCurve(r(Ξ±))) @test norm(F.(ti) .- yt) < 1.5 t, yt = invert(s, ys .+ noise, ti, (t, s) -> exp(-t * s), XuPei(r(1), 0.95)) @test norm(F.(ti) .- yt) < 1.5 end end
Fredholm
https://github.com/MatFi/Fredholm.jl.git
[ "MIT" ]
0.1.1
7cf8421732ddbae810c2f230613a2b6ca0c928fe
code
97
using Fredholm using Test using QuadGK, Random using LinearAlgebra: norm include("fredholm.jl")
Fredholm
https://github.com/MatFi/Fredholm.jl.git
[ "MIT" ]
0.1.1
7cf8421732ddbae810c2f230613a2b6ca0c928fe
docs
1432
# Fredholm ## Usage As an example, consider input data of the following form ```julia using Fredholm, QuadGK, Random Random.seed!(1234); F(t) = exp(-(t - 2)^2 / (2 * 0.3^2)) + exp(-(t - 3)^2 / (2 * 0.3^2)) y(s) = quadgk(t -> F(t) * exp(-t * s), 0, Inf, rtol=1e-6)[1] s = 10.0.^(-2:0.05:1) # generate discrete example data ys = map(y, s) # from this we want to approximate F(t) noise = (randn(length(ys))) * 0.001 ti = 0:0.01:5|> collect #define the t-domain for the solution Ξ± = 1.2e-4 t, yt, ss, yss= invert(s, ys .+ noise, ti, (t, s) -> exp(-t * s), Tikhonov(Ξ±)) ``` The solution `yt` at discrete `t` will very much depend on the choice of the regularization parameter `Ξ±`. If more noise is present in the data a higher `Ξ±` should be picked and vice versa. The variables `ss` and `yss` contain the regularized from of `s` and `ys`, where `ss[end]` contians the `y-offset`. If `invert` is called with the keyword `yoffset=false` `ss` and `s` will be equal. ![example](example.png) To allow the solution to take also negative amplitudes use the `tdomain = :real` keyword ```julia t, yt = invert(s, ys .+ noise, ti, (t, s) -> exp(-t * s), Tikhonov(Ξ±), tdomain=:real) ``` In cases that the regularization parameter `Ξ±` is not known beforehand one can estimate it via the L-Curve method by calling ```julia t, yt = invert(s, ys .+ noise, ti, (t, s) -> exp(-t * s), LCurve(Tikhonov(Ξ±)), tdomain=:real) ```
Fredholm
https://github.com/MatFi/Fredholm.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
1646
using BenchmarkTools using Clustering, Distances, Random Random.seed!(345678) const SUITE = BenchmarkGroup() SUITE["hclust"] = BenchmarkGroup() function random_distance_matrix(n::Integer, m::Integer=10, dist::PreMetric=Euclidean()) pts = rand(m, n) return pairwise(dist, pts, dims=2) end function hclust_benchmark(n::Integer, m::Integer=10, dist::PreMetric=Euclidean()) res = BenchmarkGroup() for linkage in ("single", "average", "complete") res[linkage] = @benchmarkable hclust(D, linkage=Symbol($linkage)) setup=(D=random_distance_matrix($n, $m, $dist)) end return res end for n in (10, 100, 1000, 10000) SUITE["hclust"][n] = hclust_benchmark(n) end SUITE["cutree"] = BenchmarkGroup() for (n, k) in ((10, 3), (100, 10), (1000, 100), (10000, 1000)) SUITE["cutree"][(n,k)] = @benchmarkable cutree(hclu, k=$k) setup=(D=random_distance_matrix($n, 5); hclu=hclust(D, linkage=:single)) end function silhouette_benchmark(metric, assgns, points, nclusters) res = BenchmarkGroup() res[:distances] = @benchmarkable silhouettes($assgns, pairwise($metric, $points, $points, dims=2)) res[:points] = @benchmarkable silhouettes($assgns, $points; metric=$metric) return res end SUITE["silhouette"] = BenchmarkGroup() for metric in [SqEuclidean(), Euclidean()] SUITE["silhouette"]["metric=$(typeof(metric))"] = metric_bench = BenchmarkGroup() for n in [100, 1000, 10000, 20000] nclusters = 10 dims = 10 points = randn(dims, n) assgns = rand(1:nclusters, n) metric_bench["n=$n"] = silhouette_benchmark(metric, assgns, points, nclusters) end end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
630
using BenchmarkTools using Dates, Distances include("benchmarks.jl") BenchmarkTools.DEFAULT_PARAMETERS.seconds = 10.0 # Long enough # Tuning tunefilename = joinpath(@__DIR__, "params.json") if !isfile(tunefilename) tuning = tune!(SUITE; verbose = true); BenchmarkTools.save(tunefilename, params(SUITE)) end loadparams!(SUITE, BenchmarkTools.load(tunefilename)[1], :evals, :samples); # Run and judge results = run(SUITE; verbose = true) # save results to JSON file BenchmarkTools.save(joinpath(@__DIR__, "clustering_benchmark_"*Dates.format(now(), "yyyymmdd-HHMM")*".json"), results) @show results
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
593
using Documenter, Clustering makedocs( source = "source", format = Documenter.HTML(prettyurls=false), sitename = "Clustering.jl", modules = [Clustering], pages = [ "Introduction" => "index.md", "Algorithms" => [ "algorithms.md", "init.md", "kmeans.md", "kmedoids.md", "hclust.md", "mcl.md", "affprop.md", "dbscan.md", "fuzzycmeans.md", ], "validate.md", ], ) deploydocs( repo = "github.com/JuliaStats/Clustering.jl.git", )
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
1478
using Plots, Clustering ## test data with 3 clusters X = hcat([4., 5.] .+ 0.4 * randn(2, 10), [9., -5.] .+ 0.4 * randn(2, 5), [-4., -9.] .+ 1 * randn(2, 5)) ## visualisation of the exemplary data scatter(X[1,:], X[2,:], label = "data points", xlabel = "x", ylabel = "y", legend = :right, ) nclusters = 2:5 ## hard clustering quality clusterings = kmeans.(Ref(X), nclusters) hard_indices = [:silhouettes, :calinski_harabasz, :xie_beni, :davies_bouldin, :dunn] kmeans_quality = Dict( qidx => clustering_quality.(Ref(X), clusterings, quality_index = qidx) for qidx in hard_indices) plot(( plot(nclusters, kmeans_quality[qidx], marker = :circle, title = qidx, label = nothing, ) for qidx in hard_indices)..., layout = (3, 2), xaxis = "N clusters", plot_title = "\"Hard\" clustering quality indices" ) ## soft clustering quality fuzziness = 2 fuzzy_clusterings = fuzzy_cmeans.(Ref(X), nclusters, fuzziness) soft_indices = [:calinski_harabasz, :xie_beni] fuzzy_cmeans_quality = Dict( qidx => clustering_quality.(Ref(X), fuzzy_clusterings, fuzziness = fuzziness, quality_index = qidx) for qidx in soft_indices) plot(( plot(nclusters, fuzzy_cmeans_quality[qidx], marker = :circle, title = qidx, label = nothing, ) for qidx in soft_indices)..., layout = (2, 1), xaxis = "N clusters", plot_title = "\"Soft\" clustering quality indices" )
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
1791
module Clustering using Distances using NearestNeighbors using StatsBase using Printf using LinearAlgebra using SparseArrays using Statistics using Random import Base: show import StatsBase: counts export # reexport from StatsBase sample, sample!, # common ClusteringResult, nclusters, counts, wcounts, assignments, # seeding SeedingAlgorithm, RandSeedAlg, KmppAlg, KmCentralityAlg, copyseeds, copyseeds!, initseeds, initseeds!, initseeds_by_costs, initseeds_by_costs!, kmpp, kmpp_by_costs, # kmeans kmeans, kmeans!, KmeansResult, # kmedoids kmedoids, kmedoids!, KmedoidsResult, # affprop AffinityPropResult, affinityprop, # dbscan DbscanResult, DbscanCluster, dbscan, # fuzzy_cmeans fuzzy_cmeans, FuzzyCMeansResult, # counts counts, # reexport StatsBase.counts # silhouette silhouettes, # quality indices clustering_quality, # varinfo varinfo, # randindex randindex, # V-measure vmeasure, # mutualinfo mutualinfo, # hclust Hclust, hclust, cutree, # MCL mcl, MCLResult, # pair confusion matrix confusion ## source files include("utils.jl") include("seeding.jl") include("kmeans.jl") include("kmedoids.jl") include("affprop.jl") include("dbscan.jl") include("mcl.jl") include("fuzzycmeans.jl") include("counts.jl") include("cluster_distances.jl") include("silhouette.jl") include("clustering_quality.jl") include("randindex.jl") include("varinfo.jl") include("vmeasure.jl") include("mutualinfo.jl") include("confusion.jl") include("hclust.jl") include("deprecate.jl") end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
7714
# Affinity propagation # # Reference: # Clustering by Passing Messages Between Data Points. # Brendan J. Frey and Delbert Dueck # Science, vol 315, pages 972-976, 2007. # #### Interface """ AffinityPropResult <: ClusteringResult The output of affinity propagation clustering ([`affinityprop`](@ref)). # Fields * `exemplars::Vector{Int}`: indices of *exemplars* (cluster centers) * `assignments::Vector{Int}`: cluster assignments for each data point * `iterations::Int`: number of iterations executed * `converged::Bool`: converged or not """ mutable struct AffinityPropResult <: ClusteringResult exemplars::Vector{Int} # indexes of exemplars (centers) assignments::Vector{Int} # assignments for each point counts::Vector{Int} # number of data points in each cluster iterations::Int # number of iterations executed converged::Bool # converged or not end const _afp_default_maxiter = 200 const _afp_default_damp = 0.5 const _afp_default_tol = 1.0e-6 const _afp_default_display = :none """ affinityprop(S::AbstractMatrix; [maxiter=200], [tol=1e-6], [damp=0.5], [display=:none]) -> AffinityPropResult Perform affinity propagation clustering based on a similarity matrix `S`. ``S_{ij}`` (``i β‰  j``) is the similarity (or the negated distance) between the ``i``-th and ``j``-th points, ``S_{ii}`` defines the *availability* of the ``i``-th point as an *exemplar*. # Arguments - `damp::Real`: the dampening coefficient, ``0 ≀ \\mathrm{damp} < 1``. Larger values indicate slower (and probably more stable) update. ``\\mathrm{damp} = 0`` disables dampening. - `maxiter`, `tol`, `display`: see [common options](@ref common_options) # References > Brendan J. Frey and Delbert Dueck. *Clustering by Passing Messages > Between Data Points.* Science, vol 315, pages 972-976, 2007. """ function affinityprop(S::AbstractMatrix{T}; maxiter::Integer=_afp_default_maxiter, tol::Real=_afp_default_tol, damp::Real=_afp_default_damp, display::Symbol=_afp_default_display) where T<:AbstractFloat # check arguments n = size(S, 1) size(S, 2) == n || throw(ArgumentError("S must be a square matrix ($(size(S)) given).")) n >= 2 || throw(ArgumentError("At least two data points are required ($n given).")) tol > 0 || throw(ArgumentError("tol must be a positive value ($tol given).")) 0 <= damp < 1 || throw(ArgumentError("damp must be a non-negative real value below 1 ($damp given).")) # invoke core implementation _affinityprop(S, round(Int, maxiter), tol, convert(T, damp), display_level(display)) end #### Implementation function _affinityprop(S::AbstractMatrix{T}, maxiter::Int, tol::Real, damp::T, displevel::Int) where T<:AbstractFloat n = size(S, 1) n2 = n * n # initialize messages R = zeros(T, n, n) # responsibilities A = zeros(T, n, n) # availabilities # prepare storages Rt = Matrix{T}(undef, n, n) At = Matrix{T}(undef, n, n) if displevel >= 2 @printf "%7s %12s | %8s \n" "Iters" "objv-change" "exemplars" println("-----------------------------------------------------") end t = 0 converged = false while !converged && t < maxiter t += 1 # compute new messages _afp_compute_r!(Rt, S, A) _afp_dampen_update!(R, Rt, damp) _afp_compute_a!(At, R) _afp_dampen_update!(A, At, damp) # determine convergence ch = max(Linfdist(A, At), Linfdist(R, Rt)) / (one(T) - damp) converged = (ch < tol) if displevel >= 2 # count the number of exemplars ne = _afp_count_exemplars(A, R) @printf("%7d %12.4e | %8d\n", t, ch, ne) end end # extract exemplars and assignments exemplars = _afp_extract_exemplars(A, R) if isempty(exemplars) @show A R end @assert !isempty(exemplars) assignments, counts = _afp_get_assignments(S, exemplars) if displevel >= 1 if converged @info "Affinity propagation converged with $t iterations: $(length(exemplars)) exemplars." else @warn "Affinity propagation terminated without convergence after $t iterations: $(length(exemplars)) exemplars." end end # produce output struct return AffinityPropResult(exemplars, assignments, counts, t, converged) end # compute responsibilities function _afp_compute_r!(R::Matrix{T}, S::AbstractMatrix{T}, A::Matrix{T}) where T n = size(S, 1) I1 = Vector{Int}(undef, n) # I1[i] is the column index of the maximum element in (A+S)[i,:] Y1 = Vector{T}(undef, n) # Y1[i] is the maximum element in (A+S)[i,:] Y2 = Vector{T}(undef, n) # Y2[i] is the second maximum element in (A+S)[i,:] # Find the first and second maximum elements along each row @inbounds for i = 1:n v1 = A[i,1] + S[i,1] v2 = A[i,2] + S[i,2] if v1 > v2 I1[i] = 1 Y1[i] = v1 Y2[i] = v2 else I1[i] = 2 Y1[i] = v2 Y2[i] = v1 end end @inbounds for j = 3:n, i = 1:n v = A[i,j] + S[i,j] if v > Y2[i] if v > Y1[i] Y2[i] = Y1[i] I1[i] = j Y1[i] = v else Y2[i] = v end end end # compute R values @inbounds for j = 1:n, i = 1:n mv = (j == I1[i] ? Y2[i] : Y1[i]) R[i,j] = S[i,j] - mv end return R end # compute availabilities function _afp_compute_a!(A::Matrix{T}, R::Matrix{T}) where T n = size(R, 1) z = zero(T) for j = 1:n @inbounds rjj = R[j,j] # compute s <- sum_{i \ne j} max(0, R(i,j)) s = z for i = 1:n if i != j @inbounds r = R[i,j] if r > z s += r end end end for i = 1:n if i == j @inbounds A[i,j] = s else @inbounds r = R[i,j] u = rjj + s if r > z u -= r end A[i,j] = ifelse(u < z, u, z) end end end return A end # dampen update function _afp_dampen_update!(x::Array{T}, xt::Array{T}, damp::T) where T ct = one(T) - damp for i = 1:length(x) @inbounds x[i] = ct * xt[i] + damp * x[i] end return x end # count the number of exemplars function _afp_count_exemplars(A::Matrix, R::Matrix) n = size(A,1) c = 0 for i = 1:n @inbounds if A[i,i] + R[i,i] > 0 c += 1 end end return c end # extract all exemplars function _afp_extract_exemplars(A::Matrix, R::Matrix) n = size(A,1) r = Int[] for i = 1:n @inbounds if A[i,i] + R[i,i] > 0 push!(r, i) end end return r end # get assignments function _afp_get_assignments(S::AbstractMatrix, exemplars::Vector{Int}) n = size(S, 1) k = length(exemplars) Se = S[:, exemplars] a = Vector{Int}(undef, n) for i = 1:n p = 1 v = Se[i,1] for j = 2:k s = Se[i,j] if s > v v = s p = j end end a[i] = p end a[exemplars] = eachindex(exemplars) cnts = zeros(Int, k) for aa in a @inbounds cnts[aa] += 1 end return (a, cnts) end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
10100
#=== Base type for efficient computation of average(mean) distances from the cluster centers to a given point. The descendant types should implement the following methods: * `update!(dists, assignments, points)`: update the internal state of `dists` with point coordinates and their assignments to the clusters * `sumdistances(dists, points, indices)`: compute the sum of distances from all `dists` clusters to `points` ===# abstract type ClusterDistances{T} end # create empty ClusterDistances object for a given metric # and update it with a given clustering # if batch_size is specified, the updates are done in point batches of given size function ClusterDistances(metric::SemiMetric, assignments::AbstractVector{<:Integer}, points::AbstractMatrix{<:Real}, batch_size::Union{Integer, Nothing} = nothing) update!(ClusterDistances(eltype(points), metric, length(assignments), size(points, 1), maximum(assignments)), assignments, points, batch_size) end ClusterDistances(metric, R::ClusteringResult, args...) = ClusterDistances(metric, assignments(R), args...) # fallback implementations of ClusteringDistances methods cluster_sizes(dists::ClusterDistances) = dists.cluster_sizes nclusters(dists::ClusterDistances) = length(cluster_sizes(dists)) update!(dists::ClusterDistances, assignments::AbstractVector, points::AbstractMatrix) = error("update!(dists::$(typeof(dists))) not implemented") sumdistances(dists::ClusterDistances, points::Union{AbstractMatrix, Nothing}, indices::Union{AbstractVector{<:Integer}, Nothing}) = error("sumdistances(dists::$(typeof(dists))) not implemented") # average distances from each cluster to each point, nclustersΓ—n matrix function meandistances(dists::ClusterDistances, assignments::AbstractVector{<:Integer}, points::Union{AbstractMatrix, Nothing}, indices::AbstractVector{<:Integer}) @assert length(assignments) == length(indices) (points === nothing) || @assert(size(points, 2) == length(indices)) clu_to_pt = sumdistances(dists, points, indices) clu_sizes = cluster_sizes(dists) @assert length(assignments) == length(indices) @assert size(clu_to_pt) == (length(clu_sizes), length(assignments)) # normalize distances by cluster sizes @inbounds for j in eachindex(assignments) for (i, c) in enumerate(clu_sizes) if i == assignments[j] c -= 1 end if c == 0 clu_to_pt[i,j] = 0 else clu_to_pt[i,j] /= c end end end return clu_to_pt end # wrapper for ClusteringResult update!(dists::ClusterDistances, R::ClusteringResult, args...) = update!(dists, assignments(R), args...) # batch-update silhouette dists (splitting the points into chunks of batch_size size) function update!(dists::ClusterDistances, assignments::AbstractVector{<:Integer}, points::AbstractMatrix{<:Real}, batch_size::Union{Integer, Nothing}) n = size(points, 2) ((batch_size === nothing) || (n <= batch_size)) && return update!(dists, assignments, points) for batch_start in 1:batch_size:n batch_ixs = batch_start:min(batch_start + batch_size - 1, n) update!(dists, view(assignments, batch_ixs), view(points, :, batch_ixs)) end return dists end # generic ClusterDistances implementation for an arbitrary metric M # if M is Nothing, point_dists is an arbitrary matrix of point distances struct SimpleClusterDistances{M, T} <: ClusterDistances{T} metric::M cluster_sizes::Vector{Int} assignments::Vector{Int} point_dists::Matrix{T} SimpleClusterDistances(::Type{T}, metric::M, npoints::Integer, nclusters::Integer) where {M<:Union{SemiMetric, Nothing}, T<:Real} = new{M, T}(metric, zeros(Int, nclusters), Vector{Int}(), Matrix{T}(undef, npoints, npoints)) # reuse given points matrix function SimpleClusterDistances( metric::Nothing, assignments::AbstractVector{<:Integer}, point_dists::AbstractMatrix{T} ) where T<:Real n = length(assignments) size(point_dists) == (n, n) || throw(DimensionMismatch("assignments length ($n) does not match distances matrix size ($(size(point_dists)))")) issymmetric(point_dists) || throw(ArgumentError("point distances matrix must be symmetric")) clu_sizes = zeros(Int, maximum(assignments)) @inbounds for cluster in assignments clu_sizes[cluster] += 1 end new{Nothing, T}(metric, clu_sizes, assignments, point_dists) end end # fallback ClusterDistances constructor ClusterDistances(::Type{T}, metric::Union{SemiMetric, Nothing}, npoints::Union{Integer, Nothing}, dims::Integer, nclusters::Integer) where T<:Real = SimpleClusterDistances(T, metric, npoints, nclusters) # when metric is nothing, points is the matrix of distances function ClusterDistances(metric::Nothing, assignments::AbstractVector{<:Integer}, points::AbstractMatrix, batch_size::Union{Integer, Nothing} = nothing) (batch_size === nothing) || (size(points, 2) > batch_size) || error("batch-updates of distance matrix-based ClusterDistances not supported") SimpleClusterDistances(metric, assignments, points) end function update!(dists::SimpleClusterDistances{M}, assignments::AbstractVector{<:Integer}, points::AbstractMatrix{<:Real}) where M @assert length(assignments) == size(points, 2) check_assignments(assignments, nclusters(dists)) append!(dists.assignments, assignments) n = size(dists.point_dists, 1) length(dists.assignments) == n || error("$(typeof(dists)) does not support batch updates: $(length(assignments)) points given, $n expected") @inbounds for cluster in assignments dists.cluster_sizes[cluster] += 1 end if M === Nothing size(point_dists) == (n, n) || throw(DimensionMismatch("points should be a point-to-point distances matrix of ($n, $n) size, $(size(points)) given")) copy!(dists.point_dists, point_dists) else # metric-based SimpleClusterDistances does not support batched updates size(points, 2) == n || throw(DimensionMismatch("points should be a point coordinates matrix with $n columns, $(size(points, 2)) found")) pairwise!(dists.metric, dists.point_dists, points, dims=2) end return dists end # this function returns matrix r nclusters x n, such that # r[i, j] is the sum of distances from all i-th cluster points to the point indices[j] function sumdistances(dists::SimpleClusterDistances, points::Union{AbstractMatrix, Nothing}, # unused as distances are already in point_dists indices::AbstractVector{<:Integer}) T = eltype(dists.point_dists) n = length(dists.assignments) S = typeof((one(T)+one(T))/2) r = zeros(S, nclusters(dists), n) @inbounds for (jj, j) in enumerate(indices) for i = 1:j-1 r[dists.assignments[i], jj] += dists.point_dists[i,j] end for i = j+1:n r[dists.assignments[i], jj] += dists.point_dists[i,j] end end return r end # uses the method from "Distributed Silhouette Algorithm: Evaluating Clustering on Big Data" # https://arxiv.org/abs/2303.14102 # for SqEuclidean point distances struct SqEuclideanClusterDistances{T} <: ClusterDistances{T} cluster_sizes::Vector{Int} # [nclusters] Y::Matrix{T} # [dims, nclusters], the first moments of each cluster (sum of point coords) Ξ¨::Vector{T} # [nclusters], the second moments of each cluster (sum of point coord squares) SqEuclideanClusterDistances(::Type{T}, npoints::Union{Integer, Nothing}, dims::Integer, nclusters::Integer) where T<:Real = new{T}(zeros(Int, nclusters), zeros(T, dims, nclusters), zeros(T, nclusters)) end ClusterDistances(::Type{T}, metric::SqEuclidean, npoints::Union{Integer, Nothing}, dims::Integer, nclusters::Integer) where T<:Real = SqEuclideanClusterDistances(T, npoints, dims, nclusters) function update!(dists::SqEuclideanClusterDistances, assignments::AbstractVector{<:Integer}, points::AbstractMatrix{<:Real}) # x dims are [D,N] d, n = size(points) k = length(cluster_sizes(dists)) check_assignments(assignments, k) n == length(assignments) || throw(DimensionMismatch("points count ($n) does not match assignments length $(length(assignments)))")) d == size(dists.Y, 1) || throw(DimensionMismatch("points dims ($(size(points, 1))) do no must match ClusterDistances dims ($(size(dists.Y, 1)))")) # precompute moments and update counts @inbounds for (i, cluster) in enumerate(assignments) point = view(points, :, i) # switch to eachslice() once Julia-1.0 support is dropped dists.cluster_sizes[cluster] += 1 dists.Y[:, cluster] .+= point dists.Ξ¨[cluster] += sum(abs2, point) end return dists end # sum distances from each cluster to each point in `points`, [nclusters, n] function sumdistances(dists::SqEuclideanClusterDistances, points::AbstractMatrix, indices::AbstractVector{<:Integer}) @assert size(points, 2) == length(indices) point_norms = sum(abs2, points; dims=1) # [1,n] return dists.cluster_sizes .* point_norms .+ reshape(dists.Ξ¨, nclusters(dists), 1) .- 2 * (transpose(dists.Y) * points) end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
13614
# main method for hard clustering indices + docs """ For "hard" clustering: clustering_quality(data, centers, assignments; quality_index, [metric]) clustering_quality(data, clustering; quality_index, [metric]) For fuzzy ("soft") clustering: clustering_quality(data, centers, weights; quality_index, fuzziness, [metric]) clustering_quality(data, clustering; quality_index, fuzziness, [metric]) For "hard" clustering without specifying cluster centers: clustering_quality(data, assignments; quality_index, [metric]) clustering_quality(data, clustering; quality_index, [metric]) For "hard" clustering without specifying data points and cluster centers: clustering_quality(assignments, dist_matrix; quality_index) clustering_quality(clustering, dist_matrix; quality_index) Compute the *quality index* for a given clustering. Returns a quality index (real value). ## Arguments - `data::AbstractMatrix`: ``dΓ—n`` data matrix with each column representing one ``d``-dimensional data point - `centers::AbstractMatrix`: ``dΓ—k`` matrix with cluster centers represented as columns - `assignments::AbstractVector{Int}`: ``n`` vector of point assignments (cluster indices) - `weights::AbstractMatrix`: ``nΓ—k`` matrix with fuzzy clustering weights, `weights[i,j]` is the degree of membership of ``i``-th data point to ``j``-th cluster - `clustering::Union{ClusteringResult, FuzzyCMeansResult}`: the output of the clustering method - `quality_index::Symbol`: quality index to calculate; see below for the supported options - `dist_matrix::AbstractMatrix`: a ``nΓ—n`` pairwise distance matrix; `dist_matrix[i,j]` is the distance between ``i``-th and ``j``-th points ## Keyword arguments - `quality_index::Symbol`: clustering *quality index* to calculate; see below for the supported options - `fuzziness::Real`: clustering *fuzziness* > 1 - `metric::SemiMetric=SqEuclidean()`: `SemiMetric` object that defines the metric/distance/similarity function When calling `clustering_quality`, one can explicitly specify `centers`, `assignments`, and `weights`, or provide `ClusteringResult` via `clustering`, from which the necessary data will be read automatically. For clustering without known cluster centers the `data` points are not required. `dist_matrix` could be provided explicitly, otherwise it would be calculated from the `data` points using the specified `metric`. ## Supported quality indices - `:calinski_harabasz`: hard or fuzzy Calinski-Harabsz index (↑), the corrected ratio of between cluster centers inertia and within-clusters inertia - `:xie_beni`: hard or fuzzy Xie-Beni index (↓), the ratio betwen inertia within clusters and minimal distance between the cluster centers - `:davies_bouldin`: Davies-Bouldin index (↓), the similarity between the cluster and the other most similar one, averaged over all clusters - `:dunn`: Dunn index (↑), the ratio of the minimal distance between clusters and the maximal cluster diameter - `:silhouettes`: the average silhouette index (↑), see [`silhouettes`](@ref) The arrows ↑ or ↓ specify the direction of the incresing clustering quality. Please refer to the [documentation](@ref clustering_quality) for more details on the clustering quality indices. """ function clustering_quality( data::AbstractMatrix{<:Real}, # dΓ—n matrix centers::AbstractMatrix{<:Real}, # dΓ—k matrix assignments::AbstractVector{<:Integer}; # n vector quality_index::Symbol, metric::SemiMetric=SqEuclidean() ) d, n = size(data) dc, k = size(centers) d == dc || throw(DimensionMismatch("Inconsistent array dimensions for `data` and `centers`.")) (1 <= k <= n) || throw(ArgumentError("Number of clusters k must be from 1:n (n=$n), k=$k given.")) k >= 2 || throw(ArgumentError("Quality index not defined for the degenerated clustering with a single cluster.")) n == k && throw(ArgumentError("Quality index not defined for the degenerated clustering where each data point is its own cluster.")) seen_clusters = falses(k) for (i, clu) in enumerate(assignments) (clu in axes(centers, 2)) || throw(ArgumentError("Invalid cluster index: assignments[$i]=$(clu).")) seen_clusters[clu] = true end if !all(seen_clusters) empty_clu_ixs = findall(!, seen_clusters) @warn "Detected empty cluster(s): $(join(string.("#", empty_clu_ixs), ", ")). clustering_quality() results might be incorrect." newClusterIndices = cumsum(seen_clusters) centers = view(centers, :, seen_clusters) assignments = newClusterIndices[assignments] end if quality_index == :calinski_harabasz _cluquality_calinski_harabasz(metric, data, centers, assignments, nothing) elseif quality_index == :xie_beni _cluquality_xie_beni(metric, data, centers, assignments, nothing) elseif quality_index == :davies_bouldin _cluquality_davies_bouldin(metric, data, centers, assignments) elseif quality_index == :silhouettes mean(silhouettes(assignments, pairwise(metric, data, dims=2))) elseif quality_index == :dunn _cluquality_dunn(assignments, pairwise(metric, data, dims=2)) else throw(ArgumentError("quality_index=:$quality_index not supported.")) end end clustering_quality(data::AbstractMatrix{<:Real}, R::ClusteringResult; quality_index::Symbol, metric::SemiMetric=SqEuclidean()) = clustering_quality(data, R.centers, R.assignments; quality_index = quality_index, metric = metric) # main method for fuzzy clustering indices function clustering_quality( data::AbstractMatrix{<:Real}, # dΓ—n matrix centers::AbstractMatrix{<:Real}, # dΓ—k matrix weights::AbstractMatrix{<:Real}; # nΓ—k matrix quality_index::Symbol, fuzziness::Real, metric::SemiMetric=SqEuclidean() ) d, n = size(data) dc, k = size(centers) nw, kw = size(weights) d == dc || throw(DimensionMismatch("Inconsistent array dimensions for `data` and `centers`.")) n == nw || throw(DimensionMismatch("Inconsistent data length for `data` and `weights`.")) k == kw || throw(DimensionMismatch("Inconsistent number of clusters for `centers` and `weights`.")) (1 <= k <= n) || throw(ArgumentError("Number of clusters k must be from 1:n (n=$n), k=$k given.")) k >= 2 || throw(ArgumentError("Quality index not defined for the degenerated clustering with a single cluster.")) n == k && throw(ArgumentError("Quality index not defined for the degenerated clustering where each data point is its own cluster.")) all(>=(0), weights) || throw(ArgumentError("All weights must be larger or equal 0.")) 1 < fuzziness || throw(ArgumentError("Fuzziness must be greater than 1 ($fuzziness given)")) if quality_index == :calinski_harabasz _cluquality_calinski_harabasz(metric, data, centers, weights, fuzziness) elseif quality_index == :xie_beni _cluquality_xie_beni(metric, data, centers, weights, fuzziness) elseif quality_index in [:davies_bouldin, :silhouettes, :dunn] throw(ArgumentError("quality_index=:$quality_index does not support fuzzy clusterings.")) else throw(ArgumentError("quality_index=:$quality_index not supported.")) end end clustering_quality(data::AbstractMatrix{<:Real}, R::FuzzyCMeansResult; quality_index::Symbol, fuzziness::Real, metric::SemiMetric=SqEuclidean()) = clustering_quality(data, R.centers, R.weights; quality_index = quality_index, fuzziness = fuzziness, metric = metric) # main method for clustering indices when cluster centres not known function clustering_quality( assignments::AbstractVector{<:Integer}, # n vector dist::AbstractMatrix{<:Real}; # nΓ—n matrix quality_index::Symbol ) n, m = size(dist) na = length(assignments) n == m || throw(ArgumentError("Distance matrix must be square.")) n == na || throw(DimensionMismatch("Inconsistent array dimensions for distance matrix and assignments.")) if quality_index == :silhouettes mean(silhouettes(assignments, dist)) elseif quality_index == :dunn _cluquality_dunn(assignments, dist) elseif quality_index ∈ [:calinski_harabasz, :xie_beni, :davies_bouldin] throw(ArgumentError("quality_index=:$quality_index requires cluster centers.")) else throw(ArgumentError("quality_index=:$quality_index not supported.")) end end clustering_quality(data::AbstractMatrix{<:Real}, assignments::AbstractVector{<:Integer}; quality_index::Symbol, metric::SemiMetric=SqEuclidean()) = clustering_quality(assignments, pairwise(metric, data, dims=2); quality_index = quality_index) clustering_quality(R::ClusteringResult, dist::AbstractMatrix{<:Real}; quality_index::Symbol) = clustering_quality(R.assignments, dist; quality_index = quality_index) # utility functions # convert assignments into a vector of vectors of data point indices for each cluster function _gather_samples(assignments, k) cluster_samples = [Int[] for _ in 1:k] for (i, a) in zip(eachindex(assignments), assignments) push!(cluster_samples[a], i) end return cluster_samples end # shared between hard clustering calinski_harabasz and xie_beni function _inner_inertia( metric::SemiMetric, data::AbstractMatrix, centers::AbstractMatrix, assignments::AbstractVector{<:Integer}, fuzziness::Nothing ) inner_inertia = sum( sum(colwise(metric, view(data, :, samples), center)) for (center, samples) in zip((view(centers, :, j) for j in axes(centers, 2)), _gather_samples(assignments, size(centers, 2))) ) return inner_inertia end # shared between fuzzy clustering calinski_harabasz and xie_beni (fuzzy version) function _inner_inertia( metric::SemiMetric, data::AbstractMatrix, centers::AbstractMatrix, weights::AbstractMatrix, fuzziness::Real ) data_to_center_dists = pairwise(metric, data, centers, dims=2) inner_inertia = sum( w^fuzziness * d for (w, d) in zip(weights, data_to_center_dists) ) return inner_inertia end # hard outer inertia for calinski_harabasz function _outer_inertia( metric::SemiMetric, data::AbstractMatrix, centers::AbstractMatrix, assignments::AbstractVector{<:Integer}, fuzziness::Nothing ) global_center = vec(mean(data, dims=2)) center_distances = colwise(metric, centers, global_center) return sum(center_distances[clu] for clu in assignments) end # fuzzy outer inertia for calinski_harabasz function _outer_inertia( metric::SemiMetric, data::AbstractMatrix, centers::AbstractMatrix, weights::AbstractMatrix, fuzziness::Real ) global_center = vec(mean(data, dims=2)) center_distances = colwise(metric, centers, global_center) return sum(sum(w^fuzziness for w in view(weights, :, clu)) * d for (clu, d) in enumerate(center_distances)) end # Calinsk-Harabasz index function _cluquality_calinski_harabasz( metric::SemiMetric, data::AbstractMatrix{<:Real}, centers::AbstractMatrix{<:Real}, assignments::Union{AbstractVector{<:Integer}, AbstractMatrix{<:Real}}, fuzziness::Union{Real, Nothing} ) n, k = size(data, 2), size(centers, 2) outer_inertia = _outer_inertia(metric, data, centers, assignments, fuzziness) inner_inertia = _inner_inertia(metric, data, centers, assignments, fuzziness) return (outer_inertia / inner_inertia) * (n - k) / (k - 1) end # Davies Bouldin index function _cluquality_davies_bouldin( metric::SemiMetric, data::AbstractMatrix{<:Real}, centers::AbstractMatrix{<:Real}, assignments::AbstractVector{<:Integer}, ) clu_idx = axes(centers, 2) clu_samples = _gather_samples(assignments, length(clu_idx)) clu_diams = [mean(colwise(metric, view(data, :, samples), view(centers, :, clu))) for (clu, samples) in zip(clu_idx, clu_samples)] center_dists = pairwise(metric, centers, dims=2) DB = mean( maximum(@inbounds (clu_diams[j₁] + clu_diams[jβ‚‚]) / center_dists[j₁, jβ‚‚] for jβ‚‚ in clu_idx if jβ‚‚ β‰  j₁) for j₁ in clu_idx) return DB end # Xie-Beni index function _cluquality_xie_beni( metric::SemiMetric, data::AbstractMatrix{<:Real}, centers::AbstractMatrix{<:Real}, assignments::Union{AbstractVector{<:Integer}, AbstractMatrix{<:Real}}, fuzziness::Union{Real, Nothing} ) n, k = size(data, 2), size(centers, 2) inner_intertia = _inner_inertia(metric, data, centers, assignments, fuzziness) center_distances = pairwise(metric, centers, dims=2) min_center_distance = minimum(center_distances[j₁,jβ‚‚] for j₁ in 1:k for jβ‚‚ in j₁+1:k) return inner_intertia / (n * min_center_distance) end # Dunn index function _cluquality_dunn(assignments::AbstractVector{<:Integer}, dist::AbstractMatrix{<:Real}) max_inner_distance, min_outer_distance = typemin(eltype(dist)), typemax(eltype(dist)) for i in eachindex(assignments), j in (i + 1):lastindex(assignments) @inbounds d = dist[i, j] if assignments[i] == assignments[j] if max_inner_distance < d max_inner_distance = d end else if min_outer_distance > d min_outer_distance = d end end end return min_outer_distance / max_inner_distance end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
1911
""" confusion([T = Int], a::Union{ClusteringResult, AbstractVector}, b::Union{ClusteringResult, AbstractVector}) -> Matrix{T} Calculate the *confusion matrix* of the two clusterings. Returns the 2Γ—2 confusion matrix `C` of type `T` (`Int` by default) that represents partition co-occurrence or similarity matrix between two clusterings `a` and `b` by considering all pairs of samples and counting pairs that are assigned into the same or into different clusters. Considering a pair of samples that is in the same group as a **positive pair**, and a pair is in the different group as a **negative pair**, then the count of true positives is `C₁₁`, false negatives is `C₁₂`, false positives `C₂₁`, and true negatives is `Cβ‚‚β‚‚`: | | Positive | Negative | |:--:|:-:|:-:| |Positive|C₁₁|C₁₂| |Negative|C₂₁|Cβ‚‚β‚‚| ## See also [`counts(a::ClusteringResult, a::ClusteringResult)`](@ref counts) for full *contingency matrix*. """ function confusion(::Type{T}, a::AbstractVector{<:Integer}, b::AbstractVector{<:Integer}) where T<:Union{Integer, AbstractFloat} cc = counts(a, b) c = eltype(cc) === T ? cc : convert(Matrix{T}, cc) n = sum(c) nis = sum(abs2, sum!(zeros(T, (size(c, 1), 1)), c)) (nis < 0) && OverflowError("sum of squares of sums of rows overflowed") njs = sum(abs2, sum!(zeros(T, (1, size(c, 2))), c)) (njs < 0) && OverflowError("sum of squares of sums of columns overflowed") t2 = sum(abs2, c) (t2 < 0) && OverflowError("sum of squares of matrix elements overflowed") t3 = nis + njs C = [(t2 - n)Γ·2 (nis - t2)Γ·2; (njs - t2)Γ·2 (t2 + n^2 - t3)Γ·2] return C end confusion(T, a::ClusteringResultOrAssignments, b::ClusteringResultOrAssignments) = confusion(T, assignments(a), assignments(b)) confusion(a::ClusteringResultOrAssignments, b::ClusteringResultOrAssignments) = confusion(Int, a, b)
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
1813
# wrapper for StatsBase.counts(a::Vector, b::Vector, (1:maxA, 1:maxB)) function _counts(a::AbstractVector{<:Integer}, b::AbstractVector{<:Integer}) n = length(a) n == length(b) || throw(DimensionMismatch("Assignment vectors have different lengths ($n and $(length(b)))")) # NOTE: StatsBase.counts() throws ArgumentError for empty vectors (n == 0) && return Matrix{Int}(undef, 0, 0) minA, maxA = extrema(a) minB, maxB = extrema(b) (minA > 0 && minB > 0) || throw(ArgumentError("Cluster indices should be positive integers")) # note: ignoring minA/minB, always start from 1 to match # cluster indices and counts matrix positions return counts(a, b, (1:maxA, 1:maxB)) end """ counts(a::ClusteringResult, b::ClusteringResult) -> Matrix{Int} counts(a::ClusteringResult, b::AbstractVector{<:Integer}) -> Matrix{Int} counts(a::AbstractVector{<:Integer}, b::ClusteringResult) -> Matrix{Int} Calculate the *cross tabulation* (aka *contingency matrix*) for the two clusterings of the same data points. Returns the ``n_a Γ— n_b`` matrix `C`, where ``n_a`` and ``n_b`` are the numbers of clusters in `a` and `b`, respectively, and `C[i, j]` is the size of the intersection of `i`-th cluster from `a` and `j`-th cluster from `b`. The clusterings could be specified either as [`ClusteringResult`](@ref) instances or as vectors of data point assignments. ## See also [`confusion(a::ClusteringResult, a::ClusteringResult)`](@ref confusion) for 2Γ—2 *confusion matrix*. """ counts(a::ClusteringResult, b::ClusteringResult) = _counts(assignments(a), assignments(b)) counts(a::AbstractVector{<:Integer}, b::ClusteringResult) = _counts(a, assignments(b)) counts(a::ClusteringResult, b::AbstractVector{<:Integer}) = _counts(assignments(a), b)
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
7693
# DBSCAN Clustering # """ DbscanCluster DBSCAN cluster, part of [`DbscanResult`](@ref) returned by [`dbscan`](@ref) function. ## Fields - `size::Int`: number of points in a cluster (core + boundary) - `core_indices::Vector{Int}`: indices of points in the cluster *core*, a.k.a. *seeds* (have at least `min_neighbors` neighbors in the cluster) - `boundary_indices::Vector{Int}`: indices of the cluster points outside of *core* """ struct DbscanCluster size::Int core_indices::Vector{Int} boundary_indices::Vector{Int} end """ DbscanResult <: ClusteringResult The output of [`dbscan`](@ref) function. ## Fields - `clusters::Vector{DbscanCluster}`: clusters, length *K* - `seeds::Vector{Int}`: indices of the first points of each cluster's *core*, length *K* - `counts::Vector{Int}`: cluster sizes (number of assigned points), length *K* - `assignments::Vector{Int}`: vector of clusters indices, where each point was assigned to, length *N* """ struct DbscanResult <: ClusteringResult clusters::Vector{DbscanCluster} seeds::Vector{Int} counts::Vector{Int} assignments::Vector{Int} function DbscanResult(clusters::AbstractVector{DbscanCluster}, num_points::Integer) assignments = zeros(Int, num_points) for (i, clu) in enumerate(clusters) assignments[clu.core_indices] .= i assignments[clu.boundary_indices] .= i end new(clusters, [c.core_indices[1] for c in clusters], [c.size for c in clusters], assignments) end end """ dbscan(points::AbstractMatrix, radius::Real; [metric=Euclidean()], [min_neighbors=1], [min_cluster_size=1], [nntree_kwargs...]) -> DbscanResult Cluster `points` using the DBSCAN (Density-Based Spatial Clustering of Applications with Noise) algorithm. ## Arguments - `points`: when `metric` is specified, the *dΓ—n* matrix, where each column is a *d*-dimensional coordinate of a point; when `metric=nothing`, the *nΓ—n* matrix of pairwise distances between the points - `radius::Real`: neighborhood radius; points within this distance are considered neighbors Optional keyword arguments to control the algorithm: - `metric` (defaults to `Euclidean()`): the points distance metric to use, `nothing` means `points` is the *nΓ—n* precalculated distance matrix - `min_neighbors::Integer` (defaults to 1): the minimal number of neighbors required to assign a point to a cluster "core" - `min_cluster_size::Integer` (defaults to 1): the minimal number of points in a cluster; cluster candidates with fewer points are discarded - `nntree_kwargs...`: parameters (like `leafsize`) for the `KDTree` constructor ## Example ```julia points = randn(3, 10000) # DBSCAN clustering, clusters with less than 20 points will be discarded: clustering = dbscan(points, 0.05, min_neighbors = 3, min_cluster_size = 20) ``` ## References: * Martin Ester, Hans-Peter Kriegel, JΓΆrg Sander, and Xiaowei Xu, *"A density-based algorithm for discovering clusters in large spatial databases with noise"*, KDD-1996, pp. 226--231. * Erich Schubert, JΓΆrg Sander, Martin Ester, Hans Peter Kriegel, and Xiaowei Xu, *"DBSCAN Revisited, Revisited: Why and How You Should (Still) Use DBSCAN"*, ACM Transactions on Database Systems, Vol.42(3)3, pp. 1--21, https://doi.org/10.1145/3068335 """ function dbscan(points::AbstractMatrix, radius::Real; metric = Euclidean(), min_neighbors::Integer = 1, min_cluster_size::Integer = 1, nntree_kwargs...) 0 <= radius || throw(ArgumentError("radius $radius must be β‰₯ 0")) if metric !== nothing # points are point coordinates dim, num_points = size(points) num_points <= dim && throw(ArgumentError("points has $dim rows and $num_points columns. Must be a D x N matric with D < N")) kdtree = KDTree(points, metric; nntree_kwargs...) data = (kdtree, points) else # points is a distance matrix num_points = size(points, 1) size(points, 2) == num_points || throw(ArgumentError("When metric=nothing, points must be a square distance matrix ($(size(points)) given).")) num_points >= 2 || throw(ArgumentError("At least two data points are required ($num_points given).")) data = points end clusters = _dbscan(data, num_points, radius, min_neighbors, min_cluster_size) return DbscanResult(clusters, num_points) end # An implementation of DBSCAN algorithm that keeps track of both the core and boundary points function _dbscan(data::Union{AbstractMatrix, Tuple{NNTree, AbstractMatrix}}, num_points::Integer, radius::Real, min_neighbors::Integer, min_cluster_size::Integer) 1 <= min_neighbors || throw(ArgumentError("min_neighbors $min_neighbors must be β‰₯ 1")) 1 <= min_cluster_size || throw(ArgumentError("min_cluster_size $min_cluster_size must be β‰₯ 1")) clusters = Vector{DbscanCluster}() visited = fill(false, num_points) cluster_mask = Vector{Bool}(undef, num_points) core_mask = similar(cluster_mask) to_explore = Vector{Int}() neighbors = Vector{Int}() @inbounds for i = 1:num_points visited[i] && continue @assert isempty(to_explore) push!(to_explore, i) # start a new cluster fill!(core_mask, false) fill!(cluster_mask, false) # depth-first search to find all points in the cluster while !isempty(to_explore) point = popfirst!(to_explore) visited[point] && continue visited[point] = true _dbscan_region_query!(neighbors, data, point, radius) cluster_mask[neighbors] .= true # mark as candidates # if a point has enough neighbors, it is a 'core' point and its neighbors are added to the to_explore list if length(neighbors) >= min_neighbors core_mask[point] = true for j in neighbors visited[j] || push!(to_explore, j) end end empty!(neighbors) end # if the cluster has core and is large enough, it is accepted if any(core_mask) && (cluster_size = sum(cluster_mask)) >= min_cluster_size core = Vector{Int}() boundary = Vector{Int}() for (i, (is_cluster, is_core)) in enumerate(zip(cluster_mask, core_mask)) @assert is_core && is_cluster || !is_core # core is always in a cluster is_cluster && push!(ifelse(is_core, core, boundary), i) end @assert !isempty(core) push!(clusters, DbscanCluster(cluster_size, core, boundary)) end end return clusters end # distance matrix-based function _dbscan_region_query!(neighbors::AbstractVector, dists::AbstractMatrix, point::Integer, radius::Real) empty!(neighbors) for (i, dist) in enumerate(view(dists, :, point)) (dist <= radius) && push!(neighbors, i) end return neighbors end # NN-tree based function _dbscan_region_query!(neighbors::AbstractVector, nntree_and_points::Tuple{NNTree, AbstractMatrix}, point::Integer, radius::Real) nntree, points = nntree_and_points empty!(neighbors) return append!(neighbors, inrange(nntree, view(points, :, point), radius)) end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
3078
## Deprecated # deprecated at 0.13 @deprecate kmpp(X, k) initseeds(:kmpp, X, k) @deprecate kmpp_by_costs(costs, k) initseeds_by_costs(:kmpp, costs, k) # deprecated at 0.13.1 @deprecate copyseeds(X, iseeds) copyseeds!(Matrix{eltype(X)}(undef, size(X, 1), length(iseeds)), X, iseeds) # deprecated as of 0.13.2 @deprecate varinfo(k1::Int, a1::AbstractVector{Int}, k2::Int, a2::AbstractVector{Int}) varinfo(a1, a2) @deprecate varinfo(R::ClusteringResult, k0::Int, a0::AbstractVector{Int}) varinfo(R, a0) # deprecated as of 0.14.5 @deprecate(dbscan(D::AbstractMatrix{<:Real}, radius::Real, min_neighbors::Integer), dbscan(D, radius; metric=nothing, min_neighbors=min_neighbors)) # FIXME remove after deprecation period for merge/labels/height/method Base.propertynames(hclu::Hclust, private::Bool = false) = (fieldnames(typeof(hclu))..., #= deprecated as of 0.12 =# :height, :labels, :merge, :method) # FIXME remove after deprecation period for merge/labels/height/method @inline function Base.getproperty(hclu::Hclust, prop::Symbol) if prop === :height # deprecated as of 0.12 Base.depwarn("Hclust::height is deprecated, use Hclust::heights", Symbol("Hclust::height")) return getfield(hclu, :heights) elseif prop === :labels # deprecated as of 0.12 Base.depwarn("Hclust::labels is deprecated and will be removed in future versions", Symbol("Hclust::labels")) return 1:nnodes(hclu) elseif prop === :merge # deprecated as of 0.12 Base.depwarn("Hclust::merge is deprecated, use Hclust::merges", Symbol("Hclust::merge")) return getfield(hclu, :merges) elseif prop === :method # deprecated as of 0.12 Base.depwarn("Hclust::method is deprecated, use Hclust::linkage", Symbol("Hclust::method")) return getfield(hclu, :linkage) else return getfield(hclu, prop) end end # FIXME remove after deprecation period for cweights Base.propertynames(clu::KmeansResult, private::Bool = false) = (fieldnames(typeof(clu))..., #= deprecated as of 0.13.2 =# :cweights) # FIXME remove after deprecation period for cweights @inline function Base.getproperty(clu::KmeansResult, prop::Symbol) if prop === :cweights # deprecated as of 0.13.2 Base.depwarn("KmeansResult::cweights is deprecated, use wcounts(clu::KmeansResult)", Symbol("KmeansResult::cweights")) return clu.wcounts else return getfield(clu, prop) end end # FIXME remove after deprecation period for acosts Base.propertynames(kmed::KmedoidsResult, private::Bool = false) = (fieldnames(typeof(kmed))..., #= deprecated since v0.13.4=# :acosts) # FIXME remove after deprecation period for acosts function Base.getproperty(kmed::KmedoidsResult, prop::Symbol) if prop == :acosts # deprecated since v0.13.4 Base.depwarn("KmedoidsResult::acosts is deprecated, use KmedoidsResult::costs", Symbol("KmedoidsResult::costs")) return getfield(kmed, :costs) else return getfield(kmed, prop) end end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
5463
# Fuzzy C means algorithm ## Interface """ FuzzyCMeansResult{T<:AbstractFloat} The output of [`fuzzy_cmeans`](@ref) function. # Fields - `centers::Matrix{T}`: the ``dΓ—C`` matrix with columns being the centers of resulting fuzzy clusters - `weights::Matrix{Float64}`: the ``nΓ—C`` matrix of assignment weights (``\\mathrm{weights}_{ij}`` is the weight (probability) of assigning ``i``-th point to the ``j``-th cluster) - `iterations::Int`: the number of executed algorithm iterations - `converged::Bool`: whether the procedure converged """ struct FuzzyCMeansResult{T<:AbstractFloat} centers::Matrix{T} # cluster centers (d x C) weights::Matrix{Float64} # assigned weights (n x C) iterations::Int # number of elapsed iterations converged::Bool # whether the procedure converged end nclusters(R::FuzzyCMeansResult) = size(R.centers, 2) wcounts(R::FuzzyCMeansResult) = dropdims(sum(R.weights, dims=2), dims=2) ## Utility functions function update_weights!(weights, data, centers, fuzziness, dist_metric) pow = 2.0/(fuzziness-1) nrows, ncols = size(weights) dists = pairwise(dist_metric, data, centers, dims=2) for i in 1:nrows for j in 1:ncols den = 0.0 for k in 1:ncols den += (dists[i,j]/dists[i,k])^pow end weights[i,j] = 1.0/den end end end function update_centers!(centers, data, weights, fuzziness) nrows, ncols = size(weights) T = eltype(centers) for j in 1:ncols num = zeros(T, size(data,1)) den = zero(T) for i in 1:nrows Ξ΄m = weights[i,j]^fuzziness num += Ξ΄m * data[:,i] den += Ξ΄m end centers[:,j] = num/den end end const _fcmeans_default_maxiter = 100 const _fcmeans_default_tol = 1.0e-3 const _fcmeans_default_display = :none """ fuzzy_cmeans(data::AbstractMatrix, C::Integer, fuzziness::Real; [dist_metric::SemiMetric], [...]) -> FuzzyCMeansResult Perform Fuzzy C-means clustering over the given `data`. # Arguments - `data::AbstractMatrix`: ``dΓ—n`` data matrix. Each column represents one ``d``-dimensional data point. - `C::Integer`: the number of fuzzy clusters, ``2 ≀ C < n``. - `fuzziness::Real`: clusters fuzziness (``ΞΌ`` in the [mathematical formulation](@ref fuzzy_cmeans_def)), ``ΞΌ > 1``. Optional keyword arguments: - `dist_metric::SemiMetric` (defaults to `Euclidean`): the `SemiMetric` object that defines the distance between the data points - `maxiter`, `tol`, `display`, `rng`: see [common options](@ref common_options) """ function fuzzy_cmeans( data::AbstractMatrix{<:Real}, C::Integer, fuzziness::Real; maxiter::Integer = _fcmeans_default_maxiter, tol::Real = _fcmeans_default_tol, dist_metric::SemiMetric = Euclidean(), display::Symbol = _fcmeans_default_display, rng::AbstractRNG = Random.GLOBAL_RNG ) nrows, ncols = size(data) 2 <= C < ncols || throw(ArgumentError("C must have 2 <= C < n=$ncols ($C given)")) 1 < fuzziness || throw(ArgumentError("fuzziness must be greater than 1 ($fuzziness given)")) _fuzzy_cmeans(data, C, fuzziness, maxiter, tol, dist_metric, display_level(display), rng) end ## Core implementation function _fuzzy_cmeans( data::AbstractMatrix{T}, # data matrix C::Integer, # total number of classes fuzziness::Real, # fuzziness maxiter::Int, # maximum number of iterations tol::Real, # tolerance dist_metric::SemiMetric, # metric to calculate distance displevel::Int, # the level of display rng::AbstractRNG # RNG object ) where T<:Real nrows, ncols = size(data) # Initialize weights randomly weights = rand(rng, Float64, ncols, C) weights ./= sum(weights, dims=2) centers = zeros(T, (nrows, C)) prev_centers = identity.(centers) Ξ΄ = Inf iter = 0 if displevel >= 2 @printf "%7s %18s\n" "Iters" "center-change" println("----------------------------") end while iter < maxiter && (iter <= 1 || Ξ΄ > tol) # skip tol test for iter=1 since prev_centers are not relevant update_centers!(centers, data, weights, fuzziness) update_weights!(weights, data, centers, fuzziness, dist_metric) Ξ΄ = maximum(colwise(dist_metric, prev_centers, centers)) copyto!(prev_centers, centers) iter += 1 if displevel >= 2 @printf("%7d %18.6e\n", iter, Ξ΄) end end if Ξ΄ <= tol if displevel >= 1 @info "Fuzzy C-means converged with $iter iterations (Ξ΄ = $Ξ΄)" end else @warn "Fuzzy C-means terminated without convergence after $iter iterations (Ξ΄ = $Ξ΄)" end FuzzyCMeansResult(centers, weights, iter, Ξ΄ <= tol) end function Base.show(io::IO, result::FuzzyCMeansResult) d, C = size(result.centers) n, iter = size(result.weights, 1), result.iterations print(io, "FuzzyCMeansResult: $C clusters for $n points in $d dimensions ") if result.converged print(io, "(converged in $iter iterations)") else print(io, "(failed to converge in $iter iterations)") end end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
32274
## hclust.jl (c) 2014, 2017 David A. van Leeuwen ## Hierarchical clustering, similar to R's hclust() ## Algorithms are based upon C. F. Olson, Parallel Computing 21 (1995) 1313--1325. """ Hclust{T<:Real} The output of [`hclust`](@ref), hierarchical clustering of data points. Provides the bottom-up definition of the dendrogram as the sequence of merges of the two lower subtrees into a higher level subtree. This type mostly follows R's `hclust` class. # Fields - `merges::Matrix{Int}`: ``NΓ—2`` matrix encoding subtree merges: * each row specifies the left and right subtrees (referenced by their ``id``s) that are merged * negative subtree ``id`` denotes the leaf node and corresponds to the data point at position ``-id`` * positive ``id`` denotes nontrivial subtree (the row `merges[id, :]` specifies its left and right subtrees) - `linkage::Symbol`: the name of *cluster linkage* function used to construct the hierarchy (see [`hclust`](@ref)) - `heights::Vector{T}`: subtree heights, i.e. the distances between the left and right branches of each subtree calculated using the specified `linkage` - `order::Vector{Int}`: the data point indices ordered so that there are no intersecting branches on the *dendrogram* plot. This ordering also puts the points of the same cluster close together. See also: [`hclust`](@ref). """ struct Hclust{T<:Real} merges::Matrix{Int} # the tree merge sequence. 1st column: left subtree, 2nd column: right subtree heights::Vector{T} # subtrees heights (aggregated distance between its elements) order::Vector{Int} # the order of datapoint (leaf node) indices in the final tree linkage::Symbol # subtree distance type (cluster linkage) end nmerges(h::Hclust) = length(h.heights) # number of tree merges nnodes(h::Hclust) = length(h.order) # number of datapoints (leaf nodes) height(h::Hclust) = isempty(h.heights) ? typemin(eltype(h.heights)) : last(h.heights) function assertdistancematrix(d::AbstractMatrix) nr, nc = size(d) nr == nc || throw(DimensionMismatch("Distance matrix should be square.")) issymmetric(d) || throw(ArgumentError("Distance matrix should be symmetric.")) end ## R's order of trees _isrordered(i::Integer, j::Integer) = i < 0 && j < 0 && i > j || # leaves (datapoints) are sorted in ascending order i > 0 && j > 0 && i < j || # if i-th tree was created before j-th one, it goes first i < 0 && j > 0 # leaves go before trees # the sequence of tree merges struct HclustMerges{T<:Real} nnodes::Int # number of datapoints (leaf nodes) heights::Vector{T} # tree height mleft::Vector{Int} # ID of the left subtree merged mright::Vector{Int} # ID of the right subtree merged function HclustMerges{T}(nnodes::Integer) where {T<:Real} ntrees = max(nnodes-1, 0) new{T}(nnodes, sizehint!(T[], ntrees), sizehint!(Int[], ntrees), sizehint!(Int[], ntrees)) end end nmerges(hmer::HclustMerges) = length(hmer.heights) nnodes(hmer::HclustMerges) = hmer.nnodes # merges i-th and j-th subtrees into a new tree of height h and returns its index function push_merge!(hmer::HclustMerges{T}, i::Integer, j::Integer, h::T) where T<:Real push!(hmer.mleft, i) push!(hmer.mright, j) push!(hmer.heights, h) return nmerges(hmer) end #= utilities for working with the vector of clusters =# cluster_size(cl::AbstractVector{Vector{Int}}, i::Integer) = return i > 0 ? length(cl[i]) : #= leaf node =# 1 # indices of nodes assigned to i-th cluster # if i-th cluster is a leaf node (i < 0), return leafcluster setting its contents to [-i] function cluster_elems(clusters::AbstractVector{Vector{Int}}, i::Integer, leafcluster::AbstractVector{Int}) if i > 0 return clusters[i] else # i < 0 means it's a leaf node @assert length(leafcluster) == 1 @inbounds leafcluster[1] = -i return leafcluster end end # merges i-th and j-th clusters and adds the result to the end of the `cl` list; # i-th and j-th clusters are deactivated (emptied or replaced by `noels` vector) # if either i or j are negative, the corresponding cluster is a leaf node (-i or -j, resp.) function merge_clusters!(cl::AbstractVector{Vector{Int}}, i::Integer, j::Integer, noels::Vector{Int} = Int[]) if j < 0 # negative == cluster is a leaf node (-j) newclu = i < 0 ? [-i, -j] : push!(cl[i], -j) else clj = cl[j] if i < 0 newclu = pushfirst!(clj, -i) cl[j] = noels else newclu = append!(cl[i], clj) empty!(clj) # not used anymore end end if i > 0 cl[i] = noels # not used anymore end return push!(cl, newclu) end # compute resulting leaves (original datapoints) permutation # given a sequence of tree nodes merges function hclust_perm(hmer::HclustMerges) n = nmerges(hmer) perm = fill(1, nnodes(hmer)) # resulting permutation clusters = Vector{Int}[] # clusters elements onel = [0] # placeholder for the elements of a leaf node noels = Int[] # placeholder for empty decativated trees for i in 1:n ml = hmer.mleft[i] mr = hmer.mright[i] # elements in the right subtree are moved length(ml) positions from the start nl = cluster_size(clusters, ml) @inbounds for i in cluster_elems(clusters, mr, onel) perm[i] += nl end merge_clusters!(clusters, ml, mr, noels) end return perm end # convert HclustMerges to Hclust function Hclust(hmer::HclustMerges, method::Symbol) Hclust(hcat(hmer.mleft, hmer.mright), hmer.heights, invperm(hclust_perm(hmer)), method) end # active trees of hclust algorithm struct HclustTrees{T<:Real} merges::HclustMerges{T} # history of tree merges id::Vector{Int} # IDs of active trees cl::Vector{Vector{Int}} # elements in the non-trivial trees noels::Vector{Int} # empty placeholder for elements of deactivated trees HclustTrees{T}(n::Integer) where T<:Real = new{T}(HclustMerges{T}(n), collect(-(1:n)), # init with all leaves sizehint!(Vector{Int}[], n), Vector{Int}()) end nmerges(htre::HclustTrees) = nmerges(htre.merges) ntrees(htre::HclustTrees) = length(htre.id) nnodes(htre::HclustTrees) = nnodes(htre.merges) tree_size(htre::HclustTrees, i::Integer) = cluster_size(htre.cl, htre.id[i]) # ids of elements assigned to the tree with i-th index # if i-th the is a leaf node, return leafcluster setting its contents to the id of that node tree_elems(htre::HclustTrees, i::Integer, leafcluster::AbstractVector{Int}) = cluster_elems(htre.cl, htre.id[i], leafcluster) # merges the trees referenced by indices i and j in htre.id into a new tree of height h; # the i-th and j-th trees are deactived, their containers are emptied or replaced by `noels` placeholder function merge_trees!(htre::HclustTrees, i::Integer, j::Integer, h::Real) # get tree ids ci = htre.id[i] cj = htre.id[j] cnew = push_merge!(htre.merges, ci, cj, h) # in the tree list, replace ci by cnew and cj by cnew the last tree htre.id[i] = cnew htre.id[j] = htre.id[end] pop!(htre.id) merge_clusters!(htre.cl, ci, cj, htre.noels) return htre end ## This seems to work like R's implementation, but it is extremely inefficient ## This probably scales O(n^3) or worse. We can use it to check correctness function hclust_n3(d::AbstractMatrix, linkage::Function) assertdistancematrix(d) T = eltype(linkage(d, 1:0, 1:0)) htre = HclustTrees{T}(size(d, 1)) onecol = [0] onerow = [0] while ntrees(htre) > 1 # find the closest pair of trees mi and mj, mj < mi NNmindist = typemax(T) NNi = NNj = 0 # indices of nearest neighbors clusters for j in 1:ntrees(htre) cols = tree_elems(htre, j, onecol) for i in (j+1):ntrees(htre) rows = tree_elems(htre, i, onerow) dist = linkage(d, rows, cols) # very expensive if (NNi == 0) || (dist < NNmindist) NNmindist = dist NNi = i NNj = j end end end merge_trees!(htre, NNj, NNi, NNmindist) end return htre.merges end #=== ReducibleMetric{T<:Real} Base type for _reducible_ Lance–Williams cluster metrics. The metric `d` is called _reducible_ if for any clusters `A`, `B` and `C` and some `ρ > 0` s.t. ``` d(A, B) < ρ, d(A, C) > ρ, d(B, C) > ρ ``` it follows that ``` d(AβˆͺB, C) > ρ ``` If the cluster metrics belongs to Lance-Williams family, there is an efficient formula that defines `d(AβˆͺB, C)` using `d(A, C)`, `d(B, C)` and `d(A, B)`. ===# abstract type ReducibleMetric{T <: Real} end # due to reducibility, new_dki=d[k,iβˆͺj] distance should not be less than # min(d[k,i], d[k,j]), enforce this property to workaround floating-point # arithmetic errors in Lance-Williams formula @inline clamp_reducible_metric(new_dki, dki, dkj) = max(new_dki, min(dki, dkj)) #=== MinimalDistance <: ReducibleMetric Distance between the clusters is the minimal distance between any pair of their points. ===# struct MinimalDistance{T} <: ReducibleMetric{T} MinimalDistance(d::AbstractMatrix{T}) where T<:Real = new{T}() end # update `metric` distance between `k`-th cluster and `i`-th cluster # (`d[k, i]`, `k < i`) after `j`-th cluster was merged into `i`-th cluster @inline update!(metric::MinimalDistance{T}, d::AbstractMatrix{T}, k::Integer, i::Integer, d_ij::T, d_kj::T, ni::Integer, nj::Integer, nk::Integer ) where T = (d[k, i] > d_kj) && (d[k, i] = d_kj) #=== WardDistance <: ReducibleMetric Ward distance between the two clusters `A` and `B` is the amount by which merging the two clusters into a single larger cluster `AβˆͺB` would increase the average squared distance of a point to its cluster centroid. ===# struct WardDistance{T} <: ReducibleMetric{T} WardDistance(d::AbstractMatrix{T}) where T<:Real = new{typeof(one(T)/2)}() end # update `metric` distance between `k`-th cluster and `i`-th cluster # (`d[k, i]`, `k < i`) after `j`-th cluster was merged into `i`-th cluster @inline function update!(metric::WardDistance{T}, d::AbstractMatrix{T}, k::Integer, i::Integer, d_ij::T, d_kj::T, ni::Integer, nj::Integer, nk::Integer ) where T d_ki = d[k, i] d[k, i] = clamp_reducible_metric(((ni+nk)*d_ki + (nj+nk)*d_kj - nk*d_ij) / (ni+nj+nk), d_ki, d_kj) end #=== AverageDistance <: ReducibleMetric Average distance between a pair of points from each clusters. ===# struct AverageDistance{T} <: ReducibleMetric{T} AverageDistance(d::AbstractMatrix{T}) where T<:Real = new{typeof(one(T)/2)}() end # update `metric` distance between `k`-th cluster and `i`-th cluster # (`d[k, i]`, `k < i`) after `j`-th cluster was merged into `i`-th cluster @inline function update!(metric::AverageDistance{T}, d::AbstractMatrix{T}, k::Integer, i::Integer, d_ij::T, d_kj::T, ni::Integer, nj::Integer, nk::Integer ) where T nij = ni + nj d_ki = d[k, i] d[k, i] = clamp_reducible_metric((ni * d_ki + nj * d_kj) / nij, d_ki, d_kj) end #=== MaximumDistance <: ReducibleMetric Maximum distance between a pair of points from each clusters. ===# struct MaximumDistance{T} <: ReducibleMetric{T} MaximumDistance(d::AbstractMatrix{T}) where T<:Real = new{T}() end # update `metric` distance between `k`-th cluster and `i`-th cluster # (`d[k, i]`, `k < i`) after `j`-th cluster was merged into `i`-th cluster @inline update!(metric::MaximumDistance{T}, d::AbstractMatrix{T}, k::Integer, i::Integer, d_ij::T, d_kj::T, ni::Integer, nj::Integer, nk::Integer ) where T = (d[k, i] < d_kj) && (d[k, i] = d_kj) # Update upper-triangular matrix `d` of cluster-cluster `metric`-based distances # when merging cluster `j` into cluster `i` and # moving the last cluster (`N`) into the `j`-th slot function update_distances_upon_merge!( d::AbstractMatrix{T}, metric::ReducibleMetric{T}, clu_size::Function, i::Integer, j::Integer, N::Integer ) where {T <: Real} @assert 1 <= i < j <= N <= size(d, 1) "1 ≀ i=$i < j=$j <= N=$N ≀ $(size(d, 1))" @inbounds d_ij = d[i, j] @inbounds ni = clu_size(i) @inbounds nj = clu_size(j) ## update d, split in ranges k<i, i<k<j, j<k≀newj for k in 1:i # k ≀ i @inbounds update!(metric, d, k, i, d_ij, d[k,j], ni, nj, clu_size(k)) end for k in (i+1):(j-1) # i < k < j @inbounds update!(metric, d, i, k, d_ij, d[k,j], ni, nj, clu_size(k)) end for k in (j+1):N # j < k ≀ N @inbounds update!(metric, d, i, k, d_ij, d[j,k], ni, nj, clu_size(k)) end ## move N-th row/col into j if j < N @inbounds d[j, j] = d[N, N] for k in 1:(j-1) # k < j < N @inbounds d[k,j] = d[k,N] end for k in (j+1):(N-1) # j < k < N @inbounds d[j,k] = d[k,N] end end return d end # nearest neighbor to i-th node given symmetric distance matrix d; # returns 0 if no nearest neighbor (1Γ—1 matrix) function nearest_neighbor(d::AbstractMatrix, i::Integer, N::Integer=size(d, 1)) (N <= 1) && return 0, NaN # initialize with the first non-i node @inbounds if i > 1 NNi = 1 NNdist = d[NNi, i] else NNi = 2 NNdist = d[i, NNi] end @inbounds for j in (NNi+1):(i-1) if NNdist > d[j, i] NNi = j NNdist = d[j, i] end end @inbounds for j in (i+1):N if NNdist > d[i, j] NNi = j NNdist = d[i, j] end end return NNi, NNdist end ## Efficient single link algorithm, according to Olson, O(n^2), fig 2. ## Verified against R's implementation, correct, and about 2.5 x faster ## For each i < j compute D(i,j) (this is already given) ## For each 0 < i ≀ n compute Nearest Neighbor NN(i) ## Repeat n-1 times ## find i,j that minimize D(i,j) ## merge clusters i and j ## update D(i,j) and NN(i) accordingly function hclust_minimum(ds::AbstractMatrix{T}) where T<:Real d = Matrix(ds) # active trees distances, only upper (i < j) is used mindist = MinimalDistance(d) hmer = HclustMerges{T}(size(d, 1)) n = nnodes(hmer) ## For each 0 < i ≀ n compute Nearest Neighbor NN[i] NN = [nearest_neighbor(d, i, n)[1] for i in 1:n] ## the main loop trees = collect(-(1:n)) # indices of active trees, initialized to all leaves while length(trees) > 1 # O(n) # find a pair of nearest trees, i and j i = 1 NNmindist = i < NN[i] ? d[i, NN[i]] : d[NN[i], i] for k in 2:length(trees) # O(n) @inbounds dist = k < NN[k] ? d[k,NN[k]] : d[NN[k],k] if dist < NNmindist NNmindist = dist i = k end end j = NN[i] if i > j i, j = j, i # make sure i < j end last_tree = length(trees) update_distances_upon_merge!(d, mindist, i -> 0, i, j, last_tree) trees[i] = push_merge!(hmer, trees[i], trees[j], NNmindist) # reassign the last tree to position j trees[j] = trees[last_tree] NN[j] = NN[last_tree] pop!(NN) pop!(trees) ## update NN[k] for k in eachindex(NN) if NN[k] == j # j is merged into i (only valid for the min!) NN[k] = i elseif NN[k] == last_tree # last_tree is moved into j NN[k] = j end end ## finally we need to update NN[i], because it was nearest to j NNmindist = typemax(T) NNi = 0 for k in 1:(i-1) @inbounds if (NNi == 0) || (d[k,i] < NNmindist) NNmindist = d[k,i] NNi = k end end for k in (i+1):length(trees) @inbounds if (NNi == 0) || (d[i,k] < NNmindist) NNmindist = d[i,k] NNi = k end end NN[i] = NNi end return hmer end ## functions to compute maximum, minimum, mean for just a slice of an array ## FIXME: method(view(d, cl1, cl2)) would be much more generic, but it leads to extra allocations function slicemaximum(d::AbstractMatrix, cl1::AbstractVector{Int}, cl2::AbstractVector{Int}) maxdist = typemin(eltype(d)) @inbounds for j in cl2, i in cl1 if d[i,j] > maxdist maxdist = d[i,j] end end maxdist end function sliceminimum(d::AbstractMatrix, cl1::AbstractVector{Int}, cl2::AbstractVector{Int}) mindist = typemax(eltype(d)) @inbounds for j in cl2, i in cl1 if d[i,j] < mindist mindist = d[i,j] end end mindist end function slicemean(d::AbstractMatrix, cl1::AbstractVector{Int}, cl2::AbstractVector{Int}) s = zero(eltype(d)) @inbounds for j in cl2 sj = zero(eltype(d)) for i in cl1 sj += d[i,j] end s += sj end s / (length(cl1)*length(cl2)) end ## reorders the tree merges by the height of resulting trees ## (to be compatible with R's hclust()) function orderbranches_r!(hmer::HclustMerges) o = sortperm(hmer.heights) io = invperm(o) ml = hmer.mleft mr = hmer.mright for i in eachindex(ml) if ml[i] > 0 ml[i] = io[ml[i]] end if mr[i] > 0 mr[i] = io[mr[i]] end if !_isrordered(ml[i], mr[i]) ml[i], mr[i] = mr[i], ml[i] end end permute!(ml, o) permute!(mr, o) permute!(hmer.heights, o) return hmer end ## Given a hierarchical cluster and the distance matrix used to generate it, ## use fast algorithm to determine optimal leaf order minimizing the distance ## between adjacent leaves. This is done using a heuristic where, ## when combining multi-leaf sub branches, only the outermost leaves are ## compared (a maximum of 4 comparisons per intersection). ## ## Sub branches are flipped if necessary to minimize the distance between adjacent ## nodes, and then the combined branches are treated as a block for future ## comparisons. ## ## Based on: ## Bar-Joseph et. al. "Fast optimal leaf ordering for hierarchical clustering." _Bioinformatics_. (2001) function orderbranches_barjoseph!(hmer::HclustMerges, dm::AbstractMatrix) order = invperm(hclust_perm(hmer)) node_ranges = Tuple{Int,Int}[] # ranges of order array indices occupied by the leaves of each node for v in 1:nnodes(hmer)-1 vl, vr = hmer.mleft[v], hmer.mright[v] (uidx, midx) = node_range(vl, order, node_ranges) (kidx, widx) = node_range(vr, order, node_ranges) (u, m) = (order[uidx], order[midx]) (k, w) = (order[kidx], order[widx]) if vl < 0 && vr < 0 # Nothing needs to be done elseif vl < 0 # check if flipping would reduce distance if dm[m,k] > dm[m,w] reverse!(order, uidx, midx) rotate_merges!(hmer, vr) end elseif vr < 0 if dm[k,m] > dm[k,u] reverse!(order, kidx, widx) rotate_merges!(hmer, vl) end elseif vl > 0 && vr > 0 # For 2 multi-leaf branches, determine if one or two flips is required # 1 = do not flip # 2 = flip left # 3 = flip right # 4 = flip both flp = argmin((dm[m,k], dm[u,k], dm[m,w], dm[u,w])) if flp == 2 || flp == 4 reverse!(order, uidx, midx) rotate_merges!(hmer, vl) end if flp == 3 || flp == 4 reverse!(order, kidx, widx) rotate_merges!(hmer, vr) end else # vl == vr == 0 error("Nodes of HclustMerges should never have a value of 0") end push!(node_ranges, (uidx, widx)) end return hmer end ## Get the left and right bounds of the range of the `order` array indices occupied by the elements of the node. ## If `node` is a leaf, left and right bounds will be the same. function node_range(node::Int, order::Vector{Int}, node_ranges::Vector{Tuple{Int,Int}}) if node < 0 # leaf node left = right = findfirst(isequal(-node), order) elseif node > 0 # branch node left, right = node_ranges[node] else error("node position cannot be zero") end return left, right end ## recursively rotate merges function rotate_merges!(hmer::HclustMerges, i::Integer) (hmer.mleft[i], hmer.mright[i]) = (hmer.mright[i], hmer.mleft[i]) # if a node is positive, it represents a merge, # and all merges below should be rotated as well if hmer.mleft[i] > 0 rotate_merges!(hmer, hmer.mleft[i]) end if hmer.mright[i] > 0 rotate_merges!(hmer, hmer.mright[i]) end end ## Another nearest neighbor algorithm, for reducible metrics ## From C. F. Olson, Parallel Computing 21 (1995) 1313--1325, fig 5 ## Verfied against R implementation for mean and maximum, correct but ~ 5x slower ## Pick c1: 0 ≀ c1 ≀ n random ## i <- 1 ## repeat n-1 times ## repeat ## i++ ## c[i] = nearest neighbor c[i-1] ## until c[i] = c[i-2] ## nearest of nearest is cluster itself ## merge c[i] and nearest neighbor c[i] ## if i>3 i -= 3 else i <- 1 function hclust_nn(d::AbstractMatrix, linkage::Function) T = eltype(linkage(d, 1:0, 1:0)) htre = HclustTrees{T}(size(d, 1)) onerow = [0] # placeholder for a leaf node of cl_i onecol = [0] # placeholder for a leaf node of cl_j NN = [1] # nearest neighbors chain of tree indices, init by random tree index while ntrees(htre) > 1 # search for a pair of closest clusters, # they would be mutual nearest neighbors on top of the NN stack NNmindist = typemax(T) while true NNtop = NN[end] els_top = tree_elems(htre, NNtop, onecol) ## find NNnext: the nearest neighbor of NNtop and the next stack top NNnext = NNtop > 1 ? 1 : 2 NNmindist = linkage(d, els_top, tree_elems(htre, NNnext, onerow)) for k in (NNnext+1):ntrees(htre) if k != NNtop dist = linkage(d, tree_elems(htre, k, onerow), els_top) if dist < NNmindist NNmindist = dist NNnext = k end end end if length(NN) > 1 && NNnext == NN[end-1] # NNnext==NN[end-1] and NNtop=NN[end] are mutual n.neighbors break else push!(NN, NNnext) # grow the chain end end ## merge NN[end] and its nearest neighbor, i.e., NN[end-1] NNlo = pop!(NN) NNhi = pop!(NN) if NNlo > NNhi NNlo, NNhi = NNhi, NNlo end last_tree = ntrees(htre) merge_trees!(htre, NNlo, NNhi, NNmindist) ## replace any nearest neighbor referring to the last_tree with NNhi if NNhi < last_tree for k in eachindex(NN) if NN[k] == last_tree NN[k] = NNhi end end end isempty(NN) && push!(NN, 1) # restart NN chain end return htre.merges end ## Nearest neighbor chain algorithm for reducible Lance-Williams metrics. ## In comparison to hclust_nn() maintains the upper-triangular matrix ## of cluster-cluster distances, so it requires O(NΒ²) memory, but it's faster, ## because distance calculation is more efficient. function hclust_nn_lw(d::AbstractMatrix, metric::ReducibleMetric{T}) where {T<:Real} dd = copyto!(Matrix{T}(undef, size(d)...), d) htre = HclustTrees{T}(size(d, 1)) NN = [1] # nearest neighbors chain of tree indices, init by random tree index while ntrees(htre) > 1 # search for a pair of closest clusters, # they would be mutual nearest neighbors on top of the NN stack NNmindist = typemax(T) while true ## find NNnext: nearest neighbor of NN[end] (and the next stack top) NNnext, NNmindist = nearest_neighbor(dd, NN[end], ntrees(htre)) @assert NNnext > 0 if length(NN) > 1 && NNnext == NN[end-1] # NNnext==NN[end-1] and NN[end] are mutual n.neighbors break else push!(NN, NNnext) end end ## merge NN[end] and its nearest neighbor, i.e., NN[end-1] NNlo = pop!(NN) NNhi = pop!(NN) if NNlo > NNhi NNlo, NNhi = NNhi, NNlo end last_tree = ntrees(htre) ## update the distance matrix (while the trees are not merged yet) update_distances_upon_merge!(dd, metric, i -> tree_size(htre, i), NNlo, NNhi, last_tree) merge_trees!(htre, NNlo, NNhi, NNmindist) # side effect: puts last_tree to NNhi for k in eachindex(NN) NNk = NN[k] if (NNk == NNlo) || (NNk == NNhi) # in case of duplicate distances, NNlo or NNhi may appear in NN # several times, if that's detected, restart NN search empty!(NN) break elseif NNk == last_tree ## the last_tree was moved to NNhi slot by merge_trees!() # update the NN references to it NN[k] = NNhi end end isempty(NN) && push!(NN, 1) # restart NN chain end return htre.merges end """ hclust(d::AbstractMatrix; [linkage], [uplo], [branchorder]) -> Hclust Perform hierarchical clustering using the distance matrix `d` and the cluster `linkage` function. Returns the dendrogram as a [`Hclust`](@ref) object. # Arguments - `d::AbstractMatrix`: the pairwise distance matrix. ``d_{ij}`` is the distance between ``i``-th and ``j``-th points. - `linkage::Symbol`: *cluster linkage* function to use. `linkage` defines how the distances between the data points are aggregated into the distances between the clusters. Naturally, it affects what clusters are merged on each iteration. The valid choices are: * `:single` (the default): use the minimum distance between any of the cluster members * `:average`: use the mean distance between any of the cluster members * `:complete`: use the maximum distance between any of the members * `:ward`: the distance is the increase of the average squared distance of a point to its cluster centroid after merging the two clusters * `:ward_presquared`: same as `:ward`, but assumes that the distances in `d` are already squared. - `uplo::Symbol` (optional): specifies whether the upper (`:U`) or the lower (`:L`) triangle of `d` should be used to get the distances. If not specified, the method expects `d` to be symmetric. - `branchorder::Symbol` (optional): algorithm to order leaves and branches. The valid choices are: * `:r` (the default): ordering based on the node heights and the original elements order (compatible with R's `hclust`) * `:barjoseph` (or `:optimal`): branches are ordered to reduce the distance between neighboring leaves from separate branches using the "fast optimal leaf ordering" algorithm from [Bar-Joseph et. al. _Bioinformatics_ (2001)](https://doi.org/10.1093/bioinformatics/17.suppl_1.S22) """ function hclust(d::AbstractMatrix; linkage::Symbol = :single, uplo::Union{Symbol, Nothing} = nothing, branchorder::Symbol=:r) if uplo !== nothing sd = Symmetric(d, uplo) # use upper/lower part of d else assertdistancematrix(d) sd = d end if linkage == :single hmer = hclust_minimum(sd) elseif linkage == :complete hmer = hclust_nn_lw(sd, MaximumDistance(sd)) elseif linkage == :average hmer = hclust_nn_lw(sd, AverageDistance(sd)) elseif linkage == :ward_presquared hmer = hclust_nn_lw(sd, WardDistance(sd)) elseif linkage == :ward if sd === d sd = abs2.(sd) else sd .= abs2.(sd) end hmer = hclust_nn_lw(sd, WardDistance(sd)) hmer.heights .= sqrt.(hmer.heights) else throw(ArgumentError("Unsupported cluster linkage $linkage")) end if branchorder == :barjoseph || branchorder == :optimal orderbranches_barjoseph!(hmer, sd) elseif branchorder == :r orderbranches_r!(hmer) else throw(ArgumentError("Unsupported branchorder=$branchorder method")) end Hclust(hmer, linkage) end @deprecate hclust(d, method::Symbol, uplo::Union{Symbol, Nothing} = nothing) hclust(d, linkage=method, uplo=uplo) """ cutree(hclu::Hclust; [k], [h]) -> Vector{Int} Cut the `hclu` dendrogram to produce clusters at the specified level of granularity. Returns the cluster assignments vector ``z`` (``z_i`` is the index of the cluster for the ``i``-th data point). # Arguments - `k::Integer` (optional) the number of desired clusters. - `h::Real` (optional) the height at which the tree is cut. If both `k` and `h` are specified, it's guaranteed that the number of clusters is not less than `k` and their height is not above `h`. See also: [`hclust`](@ref) """ function cutree(hclu::Hclust; k::Union{Integer, Nothing} = nothing, h::Union{Real, Nothing} = nothing) # check k and h (k !== nothing || h !== nothing) || throw(ArgumentError("Either `k` or `h` must be specified")) n = nnodes(hclu) m = nmerges(hclu) # use k and h to calculate how many merges to do before cutting if k !== nothing k >= min(1, n) || throw(ArgumentError("`k` should be greater or equal $(min(1,n))")) cutm = max(n - k, 0) else cutm = m end horder = sortperm(hclu.heights) # indices of nodes by height if h !== nothing # adjust cutm w.r.t h hix = findlast(i -> hclu.heights[i] ≀ h, horder) if hix !== nothing && hix < cutm cutm = hix elseif nmerges(hclu) >= 1 && hclu.heights[horder[1]] > h # corner case, the requested h smaller that the smallest nontrivial subtree cutm = 0 end end clusters = Vector{Int}[] # contents of the tree nodes, nodes are indexed by height unmerged = fill(true, n) # if a node is not merged to a cluster noels = Int[] # placeholder for empty deactivated trees hindex = invperm(horder) # index of each tree node by height, i.e. pos in `clusters` resize!(horder, cutm) # we only process the first cutm merges for i in horder # visit nodes by height m1 = hclu.merges[i, 1] m2 = hclu.merges[i, 2] if m1 < 0 unmerged[-m1] = false c1 = m1 else c1 = hindex[m1] end if m2 < 0 unmerged[-m2] = false c2 = m2 else c2 = hindex[m2] end merge_clusters!(clusters, c1, c2, noels) end ## build an array of cluster indices (R's order) res = fill(0, n) # sort non-empty clusters by the minimal element index filter!(!isempty, clusters) permute!(clusters, sortperm(minimum.(clusters))) i = findfirst(unmerged) next = 1 for clu in clusters cl1 = minimum(clu) while (i !== nothing) && (i < cl1) res[i] = next next += 1 i = findnext(unmerged, i+1) end res[clu] .= next next += 1 end while i !== nothing res[i] = next next += 1 i = findnext(unmerged, i+1) end return res end ## some diagnostic functions, not exported function printupper(d::Matrix) n = size(d,1) for i in 1:(n-1) print(" " ^ ((i-1) * 6)) for j in (i+1):n print(@sprintf("%5.2f ", d[i,j])) end println() end end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
15385
# K-means algorithm #### Interface # C is the type of centers, an (abstract) matrix of size (d x k) # D is the type of pairwise distance computation from points to cluster centers # WC is the type of cluster weights, either Int (in the case where points are # unweighted) or eltype(weights) (in the case where points are weighted). """ KmeansResult{C,D<:Real,WC<:Real} <: ClusteringResult The output of [`kmeans`](@ref) and [`kmeans!`](@ref). # Type parameters * `C<:AbstractMatrix{<:AbstractFloat}`: type of the `centers` matrix * `D<:Real`: type of the assignment cost * `WC<:Real`: type of the cluster weight """ struct KmeansResult{C<:AbstractMatrix{<:AbstractFloat},D<:Real,WC<:Real} <: ClusteringResult centers::C # cluster centers (d x k) assignments::Vector{Int} # assignments (n) costs::Vector{D} # cost of the assignments (n) counts::Vector{Int} # number of points assigned to each cluster (k) wcounts::Vector{WC} # cluster weights (k) totalcost::D # total cost (i.e. objective) iterations::Int # number of elapsed iterations converged::Bool # whether the procedure converged end wcounts(clu::KmeansResult) = clu.wcounts const _kmeans_default_init = :kmpp const _kmeans_default_maxiter = 100 const _kmeans_default_tol = 1.0e-6 const _kmeans_default_display = :none """ kmeans!(X, centers; [kwargs...]) -> KmeansResult Update the current cluster `centers` (``dΓ—k`` matrix, where ``d`` is the dimension and ``k`` the number of centroids) using the ``dΓ—n`` data matrix `X` (each column of `X` is a ``d``-dimensional data point). See [`kmeans`](@ref) for the description of optional `kwargs`. """ function kmeans!(X::AbstractMatrix{<:Real}, # in: data matrix (d x n) centers::AbstractMatrix{<:AbstractFloat}; # in: current centers (d x k) weights::Union{Nothing, AbstractVector{<:Real}}=nothing, # in: data point weights (n) maxiter::Integer=_kmeans_default_maxiter, # in: maximum number of iterations tol::Real=_kmeans_default_tol, # in: tolerance of change at convergence display::Symbol=_kmeans_default_display, # in: level of display distance::SemiMetric=SqEuclidean(), # in: function to compute distances rng::AbstractRNG=Random.GLOBAL_RNG) # in: RNG object d, n = size(X) dc, k = size(centers) WC = (weights === nothing) ? Int : eltype(weights) D = typeof(one(eltype(centers)) * one(WC)) d == dc || throw(DimensionMismatch("Inconsistent array dimensions for `X` and `centers`.")) (1 <= k <= n) || throw(ArgumentError("k must be from 1:n (n=$n), k=$k given.")) if weights !== nothing length(weights) == n || throw(DimensionMismatch("Incorrect length of weights.")) end if k == n # each point in its own cluster return KmeansResult(copyto!(centers, X), collect(1:k), zeros(D, k), fill(1, k), weights !== nothing ? copy(weights) : fill(1, k), D(0), 0, true) else if k == 1 # all points belong to the single cluster mean!(centers, X) end return _kmeans!(X, weights, centers, Int(maxiter), Float64(tol), display_level(display), distance, rng) end end """ kmeans(X, k, [...]) -> KmeansResult K-means clustering of the ``dΓ—n`` data matrix `X` (each column of `X` is a ``d``-dimensional data point) into `k` clusters. # Arguments - `init` (defaults to `:kmpp`): how cluster seeds should be initialized, could be one of the following: * a `Symbol`, the name of a seeding algorithm (see [Seeding](@ref) for a list of supported methods); * an instance of [`SeedingAlgorithm`](@ref); * an integer vector of length ``k`` that provides the indices of points to use as initial seeds. - `weights`: ``n``-element vector of point weights (the cluster centers are the weighted means of cluster members) - `maxiter`, `tol`, `display`: see [common options](@ref common_options) """ function kmeans(X::AbstractMatrix{<:Real}, # in: data matrix (d x n) columns = obs k::Integer; # in: number of centers weights::Union{Nothing, AbstractVector{<:Real}}=nothing, # in: data point weights (n) init::Union{Symbol, SeedingAlgorithm, AbstractVector{<:Integer}}= _kmeans_default_init, # in: initialization algorithm maxiter::Integer=_kmeans_default_maxiter, # in: maximum number of iterations tol::Real=_kmeans_default_tol, # in: tolerance of change at convergence display::Symbol=_kmeans_default_display, # in: level of display distance::SemiMetric=SqEuclidean(), # in: function to calculate distance with rng::AbstractRNG=Random.GLOBAL_RNG) # in: RNG object d, n = size(X) (1 <= k <= n) || throw(ArgumentError("k must be from 1:n (n=$n), k=$k given.")) # initialize the centers using a type wide enough so that the updates # centers[i, cj] += X[i, j] * wj will occur without loss of precision through rounding T = float(weights === nothing ? eltype(X) : promote_type(eltype(X), eltype(weights))) iseeds = initseeds(init, X, k, rng=rng) centers = copyseeds!(Matrix{T}(undef, d, k), X, iseeds) kmeans!(X, centers; weights=weights, maxiter=Int(maxiter), tol=Float64(tol), display=display, distance=distance, rng=rng) end #### Core implementation # core k-means skeleton function _kmeans!(X::AbstractMatrix{<:Real}, # in: data matrix (d x n) weights::Union{Nothing, Vector{<:Real}}, # in: data point weights (n) centers::AbstractMatrix{<:AbstractFloat}, # in/out: matrix of centers (d x k) maxiter::Int, # in: maximum number of iterations tol::Float64, # in: tolerance of change at convergence displevel::Int, # in: the level of display distance::SemiMetric, # in: function to calculate distance rng::AbstractRNG) # in: RNG object d, n = size(X) k = size(centers, 2) to_update = Vector{Bool}(undef, k) # whether a center needs to be updated unused = Vector{Int}() num_affected = k # number of centers to which dists need to be recomputed # assign containers for the vector of assignments & number of data points assigned to each cluster assignments = Vector{Int}(undef, n) counts = Vector{Int}(undef, k) # compute pairwise distances, preassign costs and cluster weights dmat = pairwise(distance, centers, X, dims=2) WC = (weights === nothing) ? Int : eltype(weights) wcounts = Vector{WC}(undef, k) D = typeof(one(eltype(dmat)) * one(WC)) costs = Vector{D}(undef, n) update_assignments!(dmat, true, assignments, costs, counts, to_update, unused) objv = weights === nothing ? sum(costs) : dot(weights, costs) # main loop t = 0 converged = false if displevel >= 2 @printf "%7s %18s %18s | %8s \n" "Iters" "objv" "objv-change" "affected" println("-------------------------------------------------------------") @printf("%7d %18.6e\n", t, objv) end while !converged && t < maxiter t += 1 # update (affected) centers update_centers!(X, weights, assignments, to_update, centers, wcounts) if !isempty(unused) repick_unused_centers(X, costs, centers, unused, distance, rng) to_update[unused] .= true end if t == 1 || num_affected > 0.75 * k pairwise!(distance, dmat, centers, X, dims=2) else # if only a small subset is affected, only compute for that subset affected_inds = findall(to_update) pairwise!(distance, view(dmat, affected_inds, :), view(centers, :, affected_inds), X, dims=2) end # update assignments update_assignments!(dmat, false, assignments, costs, counts, to_update, unused) num_affected = sum(to_update) + length(unused) # compute change of objective and determine convergence prev_objv = objv objv = weights === nothing ? sum(costs) : dot(weights, costs) objv_change = objv - prev_objv if objv_change > tol @warn("The clustering cost increased at iteration #$t") elseif (k == 1) || (abs(objv_change) < tol) converged = true end # display information (if required) if displevel >= 2 @printf("%7d %18.6e %18.6e | %8d\n", t, objv, objv_change, num_affected) end end if displevel >= 1 if converged println("K-means converged with $t iterations (objv = $objv)") else println("K-means terminated without convergence after $t iterations (objv = $objv)") end end return KmeansResult(centers, assignments, costs, counts, wcounts, objv, t, converged) end # # Updates assignments, costs, and counts based on # an updated (squared) distance matrix # function update_assignments!(dmat::Matrix{<:Real}, # in: distance matrix (k x n) is_init::Bool, # in: whether it is the initial run assignments::Vector{Int}, # out: assignment vector (n) costs::Vector{<:Real}, # out: costs of the resultant assignment (n) counts::Vector{Int}, # out: # of points assigned to each cluster (k) to_update::Vector{Bool}, # out: whether a center needs update (k) unused::Vector{Int} # out: list of centers with no points assigned ) k, n = size(dmat) # re-initialize the counting vector fill!(counts, 0) if is_init fill!(to_update, true) else fill!(to_update, false) if !isempty(unused) empty!(unused) end end # process each point @inbounds for j = 1:n # find the closest cluster to the i-th point. Note that a # is necessarily between 1 and size(dmat, 1) === k as a result # and can thus be used as an index in an `inbounds` environment c, a = findmin(view(dmat, :, j)) # set/update the assignment if is_init assignments[j] = a else # update pa = assignments[j] if pa != a # if assignment changes, # both old and new centers need to be updated assignments[j] = a to_update[a] = true to_update[pa] = true end end # set costs and counts accordingly costs[j] = c counts[a] += 1 end # look for centers that have no assigned points for i = 1:k if counts[i] == 0 push!(unused, i) to_update[i] = false # this is handled using different mechanism end end end # # Update centers based on updated assignments # # (specific to the case where points are not weighted) # function update_centers!(X::AbstractMatrix{<:Real}, # in: data matrix (d x n) weights::Nothing, # in: point weights assignments::Vector{Int}, # in: assignments (n) to_update::Vector{Bool}, # in: whether a center needs update (k) centers::AbstractMatrix{<:AbstractFloat}, # out: updated centers (d x k) wcounts::Vector{Int}) # out: updated cluster weights (k) d, n = size(X) k = size(centers, 2) # initialize center weights wcounts[to_update] .= 0 # accumulate columns @inbounds for j in 1:n # skip points assigned to a center that doesn't need to be updated cj = assignments[j] if to_update[cj] if wcounts[cj] > 0 for i in 1:d centers[i, cj] += X[i, j] end else for i in 1:d centers[i, cj] = X[i, j] end end wcounts[cj] += 1 end end # sum ==> mean @inbounds for j in 1:k if to_update[j] cj = wcounts[j] for i in 1:d centers[i, j] /= cj end end end end # # Update centers based on updated assignments # # (specific to the case where points are weighted) # function update_centers!(X::AbstractMatrix{<:Real}, # in: data matrix (d x n) weights::Vector{W}, # in: point weights (n) assignments::Vector{Int}, # in: assignments (n) to_update::Vector{Bool}, # in: whether a center needs update (k) centers::AbstractMatrix{<:Real}, # out: updated centers (d x k) wcounts::Vector{W} # out: updated cluster weights (k) ) where W<:Real d, n = size(X) k = size(centers, 2) # initialize center weights wcounts[to_update] .= 0 # accumulate columns @inbounds for j in 1:n # skip points with negative weights or assigned to a center # that doesn't need to be updated wj = weights[j] cj = assignments[j] if wj > 0 && to_update[cj] if wcounts[cj] > 0 for i in 1:d centers[i, cj] += X[i, j] * wj end else for i in 1:d centers[i, cj] = X[i, j] * wj end end wcounts[cj] += wj end end # sum ==> mean @inbounds for j in 1:k if to_update[j] cj = wcounts[j] for i in 1:d centers[i, j] /= cj end end end end # # Re-picks centers that have no points assigned to them. # function repick_unused_centers(X::AbstractMatrix{<:Real}, # in: the data matrix (d x n) costs::Vector{<:Real}, # in: the current assignment costs (n) centers::AbstractMatrix{<:AbstractFloat}, # out: the centers (d x k) unused::Vector{Int}, # in: indices of centers to be updated distance::SemiMetric, # in: function to calculate the distance with rng::AbstractRNG) # in: RNG object # pick new centers using a scheme like kmeans++ ds = similar(costs) tcosts = copy(costs) n = size(X, 2) for i in unused j = wsample(rng, 1:n, tcosts) tcosts[j] = 0 v = view(X, :, j) centers[:, i] = v colwise!(distance, ds, v, X) tcosts = min(tcosts, ds) end end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
7892
# K-medoids algorithm #### Result type """ KmedoidsResult{T} <: ClusteringResult The output of [`kmedoids`](@ref) function. # Fields - `medoids::Vector{Int}`: the indices of ``k`` medoids - `assignments::Vector{Int}`: the indices of clusters the points are assigned to, so that `medoids[assignments[i]]` is the index of the medoid for the ``i``-th point - `costs::Vector{T}`: assignment costs, i.e. `costs[i]` is the cost of assigning ``i``-th point to its medoid - `counts::Vector{Int}`: cluster sizes - `totalcost::Float64`: total assignment cost (the sum of `costs`) - `iterations::Int`: the number of executed algorithm iterations - `converged::Bool`: whether the procedure converged """ mutable struct KmedoidsResult{T} <: ClusteringResult medoids::Vector{Int} # indices of methods (k) assignments::Vector{Int} # assignments (n) costs::Vector{T} # costs of the resultant assignments (n) counts::Vector{Int} # number of points assigned to each cluster (k) totalcost::Float64 # total assignment cost (i.e. objective) (k) iterations::Int # number of elapsed iterations converged::Bool # whether the procedure converged end #### interface functions const _kmed_default_init = :kmpp const _kmed_default_maxiter = 200 const _kmed_default_tol = 1.0e-8 const _kmed_default_display = :none """ kmedoids(dist::AbstractMatrix, k::Integer; ...) -> KmedoidsResult Perform K-medoids clustering of ``n`` points into `k` clusters, given the `dist` matrix (``nΓ—n``, `dist[i, j]` is the distance between the `j`-th and `i`-th points). # Arguments - `init` (defaults to `:kmpp`): how medoids should be initialized, could be one of the following: * a `Symbol` indicating the name of a seeding algorithm (see [Seeding](@ref Seeding) for a list of supported methods). * an integer vector of length `k` that provides the indices of points to use as initial medoids. - `maxiter`, `tol`, `display`: see [common options](@ref common_options) # Note The function implements a *K-means style* algorithm instead of *PAM* (Partitioning Around Medoids). K-means style algorithm converges in fewer iterations, but was shown to produce worse (10-20% higher total costs) results (see e.g. [Schubert & Rousseeuw (2019)](@ref kmedoid_refs)). """ function kmedoids(dist::AbstractMatrix{T}, k::Integer; init=_kmed_default_init, maxiter::Integer=_kmed_default_maxiter, tol::Real=_kmed_default_tol, display::Symbol=_kmed_default_display) where T<:Real # check arguments n = size(dist, 1) size(dist, 2) == n || throw(ArgumentError("dist must be a square matrix ($(size(dist)) given).")) k <= n || throw(ArgumentError("Requested number of medoids exceeds n=$n ($k given).")) # initialize medoids medoids = initseeds_by_costs(init, dist, k)::Vector{Int} @assert length(medoids) == k # invoke core algorithm _kmedoids!(medoids, dist, round(Int, maxiter), tol, display_level(display)) end """ kmedoids!(dist::AbstractMatrix, medoids::Vector{Int}; [kwargs...]) -> KmedoidsResult Update the current cluster `medoids` using the `dist` matrix. The `medoids` field of the returned `KmedoidsResult` points to the same array as `medoids` argument. See [`kmedoids`](@ref) for the description of optional `kwargs`. """ function kmedoids!(dist::AbstractMatrix{T}, medoids::Vector{Int}; maxiter::Integer=_kmed_default_maxiter, tol::Real=_kmed_default_tol, display::Symbol=_kmed_default_display) where T<:Real # check arguments n = size(dist, 1) size(dist, 2) == n || throw(ArgumentError("dist must be a square matrix ($(size(dist)) given).")) length(medoids) <= n || throw(ArgumentError("Requested number of medoids exceeds n=$n ($(length(medoids)) given).")) # invoke core algorithm _kmedoids!(medoids, dist, round(Int, maxiter), tol, display_level(display)) end #### core algorithm function _kmedoids!(medoids::Vector{Int}, # initialized medoids dist::AbstractMatrix{T}, # distance matrix maxiter::Int, # maximum number of iterations tol::Real, # tolerable change of objective displevel::Int) where T<:Real # level of display # dist[i, j] is the cost of assigning point j to the medoid i n = size(dist, 1) k = length(medoids) # prepare storage costs = Vector{T}(undef, n) counts = zeros(T, k) assignments = Vector{Int}(undef, n) groups = [Int[] for i=1:k] # initialize assignments tcost, _ = _kmed_update_assignments!(dist, medoids, assignments, groups, costs, true) # main loop t = 0 converged = false if displevel >= 2 @printf("%7s %18s %18s\n", "Iters", "objv", "objv-change") println("-----------------------------------------------------") @printf("%7d %18.6e\n", t, tcost) end while !converged && t < maxiter t += 1 # update medoids for i = 1:k medoids[i] = _find_medoid(dist, groups[i]) end # update assignments tcost_pre = tcost tcost, ch = _kmed_update_assignments!(dist, medoids, assignments, groups, costs, false) # check convergence converged = (ch == 0 || abs(tcost - tcost_pre) < tol) # display progress if displevel >= 2 @printf("%7d %18.6e %18.6e\n", t, tcost, tcost - tcost_pre) end end if displevel >= 1 if converged println("K-medoids converged with $t iterations (objv = $tcost)") else println("K-medoids terminated without convergence after $t iterations (objv = $tcost)") end end # make output counts = Int[length(g) for g in groups] KmedoidsResult{T}( medoids, assignments, costs, counts, tcost, t, converged) end # update assignments and related quantities # returns the total cost and the number of assignment changes function _kmed_update_assignments!(dist::AbstractMatrix{<:Real}, # in: (n, n) medoids::AbstractVector{Int}, # in: (k,) assignments::Vector{Int}, # out: (n,) groups::Vector{Vector{Int}}, # out: (k,) costs::AbstractVector{<:Real},# out: (n,) initial::Bool) # in n = size(dist, 1) k = length(medoids) # reset cluster groups (note: assignments are not touched yet) initial || foreach(empty!, groups) tcost = 0.0 ch = 0 for j = 1:n p = 1 # initialize the closest medoid for j mv = dist[medoids[1], j] # find the closest medoid for j @inbounds for i = 2:k m = medoids[i] v = dist[m, j] # assign if current medoid is closer or if it is j itself if (v < mv) || (m == j) (v <= mv) || throw(ArgumentError("sample #$j reassigned from medoid[$p]=#$(medoids[p]) (distance=$mv) to medoid[$i]=#$m (distance=$v); check the distance matrix correctness")) p = i mv = v end end ch += !initial && (p != assignments[j]) assignments[j] = p costs[j] = mv tcost += mv push!(groups[p], j) end return (tcost, ch) end # find medoid for a given group function _find_medoid(dist::AbstractMatrix, grp::AbstractVector{Int}) @assert !isempty(grp) p = argmin(sum(view(dist, grp, grp), dims=2)) return grp[p] end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
8911
# MCL (Markov CLustering algorithm) """ MCLResult <: ClusteringResult The output of [`mcl`](@ref) function. # Fields - `mcl_adj::AbstractMatrix`: the final MCL adjacency matrix (equilibrium state matrix if the algorithm converged), empty if `save_final_matrix` option is disabled - `assignments::Vector{Int}`: indices of the points clusters. `assignments[i]` is the index of the cluster for the ``i``-th point (``0`` if unassigned) - `counts::Vector{Int}`: the ``k``-length vector of cluster sizes - `nunassigned::Int`: the number of standalone points not assigned to any cluster - `iterations::Int`: the number of elapsed iterations - `rel_Ξ”::Float64`: the final relative Ξ” - `converged::Bool`: whether the method converged """ struct MCLResult <: ClusteringResult mcl_adj::AbstractMatrix # final MCL adjacency matrix (equilibrium state matrix if converged) assignments::Vector{Int} # point-to-cluster assignments (n) counts::Vector{Int} # number of points assigned to each cluster (k) nunassigned::Int # number of single elements not assigned to any cluster iterations::Int # number of elapsed iterations rel_Ξ”::Float64 # final relative Ξ” converged::Bool # whether the procedure converged end # Extract clusters from the final (equilibrium) MCL matrix # Return the tuple: cluster indices for each element, cluster sizes, # the number of unassigned (0 cluster index) elements (if `allow_singles` is on) # `zero_tol` is a minimal value to consider as an element-to-cluster assignment function _mcl_clusters(mcl_adj::AbstractMatrix, allow_singles::Bool, zero_tol::Float64 = 1E-20) # remove rows containing only zero elements and convert into a mask of nonzero elements el2clu_mask = mcl_adj[dropdims(sum(mcl_adj, dims=2), dims=2) .> zero_tol, :] .> zero_tol # assign cluster indexes to each node # cluster index is the index of the first TRUE in a given column _ms = mapslices(el_mask->isempty(el_mask) ? 0 : argmax(el_mask), el2clu_mask, dims=1) clu_ixs = dropdims(_ms, dims=1) clu_sizes = zeros(Int, size(el2clu_mask, 1)) unassigned_count = 0 @inbounds for clu_ix in clu_ixs (clu_ix > 0) && (clu_sizes[clu_ix] += 1) end if !allow_singles # collapse all size 1 clusters into one with index 0 @inbounds for i in eachindex(clu_ixs) clu_ix = clu_ixs[i] if clu_ix > 0 && clu_sizes[clu_ix] == 1 clu_ixs[i] = 0 clu_sizes[clu_ix] = 0 unassigned_count += 1 end end else unassigned_count = 0 end # recode clusters numbers to be in 1:N range (or 0:N if there's collapsed cluster) clu_id_map = zeros(Int, length(clu_sizes)) next_clu_ix = 0 @inbounds for i in eachindex(clu_ixs) old_clu_ix = clu_ixs[i] if old_clu_ix > 0 new_clu_ix = clu_id_map[old_clu_ix] clu_ixs[i] = new_clu_ix == 0 ? clu_id_map[old_clu_ix] = (next_clu_ix += 1) : new_clu_ix end end old_clu_sizes = clu_sizes clu_sizes = zeros(Int, next_clu_ix) for (old_clu_ix, new_clu_ix) in enumerate(clu_id_map) if new_clu_ix > 0 clu_sizes[new_clu_ix] = old_clu_sizes[old_clu_ix] end end clu_ixs, clu_sizes, unassigned_count end # adjacency matrix expansion (matrix-wise raising to a given power) kernel function _mcl_expand(src::AbstractMatrix, expansion::Number) try return src^expansion catch ex # FIXME: remove this when functionality become available in the standard library if isa(ex, MethodError) throw(ArgumentError("MCL expansion of $(typeof(src)) with expansion=$expansion not supported")) else rethrow() end end end # integral power inflation (single matrix element) _mcl_el_inflate(el::Number, inflation::Integer) = el^inflation # non-integral power inflation (single matrix element) _mcl_el_inflate(el::Number, inflation::Number) = real((el+0im)^inflation) # adjacency matrix inflation (element-wise raising to a given power) kernel function _mcl_inflate!(dest::AbstractMatrix, src::AbstractMatrix, inflation::Number) @inbounds for i in eachindex(src) dest[i] = _mcl_el_inflate(src[i], inflation) end end # adjacency matrix pruning function _mcl_prune!(mtx::AbstractMatrix, prune_tol::Number) for i in 1:size(mtx,2) c = view(mtx, :, i) ΞΈ = mean(c)*prune_tol @inbounds @simd for j in eachindex(c) c[j] = ifelse(c[j] >= ΞΈ, c[j], 0.0) end end issparse(mtx) && dropzeros!(mtx) return mtx end """ mcl(adj::AbstractMatrix; [kwargs...]) -> MCLResult Perform MCL (Markov Cluster Algorithm) clustering using ``nΓ—n`` adjacency (points similarity) matrix `adj`. # Arguments Keyword arguments to control the MCL algorithm: - `add_loops::Bool` (enabled by default): whether the edges of weight 1.0 from the node to itself should be appended to the graph - `expansion::Number` (defaults to 2): MCL *expansion* constant - `inflation::Number` (defaults to 2): MCL *inflation* constant - `save_final_matrix::Bool` (disabled by default): whether to save the final equilibrium state in the `mcl_adj` field of the result; could provide useful diagnostic if the method doesn't converge - `prune_tol::Number`: pruning threshold - `display`, `maxiter`, `tol`: see [common options](@ref common_options) # References > Stijn van Dongen, *"Graph clustering by flow simulation"*, 2001 > [Original MCL implementation](http://micans.org/mcl). """ function mcl(adj::AbstractMatrix{T}; add_loops::Bool = true, expansion::Number = 2, inflation::Number = 2, save_final_matrix::Bool = false, allow_singles::Bool = true, max_iter::Union{Integer, Nothing} = nothing, maxiter::Integer = 100, tol::Number=1.0e-5, prune_tol::Number=1.0e-5, display::Symbol=:none) where T<:Real m, n = size(adj) m == n || throw(DimensionMismatch("Square adjacency matrix expected")) # FIXME max_iter is deprecated as of 0.13.1 if max_iter !== nothing Base.depwarn("max_iter parameter is deprecated, use maxiter instead", Symbol("mcl")) maxiter = max_iter end # FIXME :verbose is deprecated as of 0.13.1 if display == :verbose Base.depwarn("display=:verbose is deprecated and will be removed in future versions, use display=:iter", Symbol("mcl")) display = :iter end disp_level = display_level(display) if add_loops @inbounds for i in 1:size(adj, 1) adj[i, i] = 1.0 end end # initialize the MCL adjacency matrix by normalized `adj` weights mcl_adj = copy(adj) # normalize in columns rmul!(mcl_adj, Diagonal(map(x -> x != 0.0 ? 1.0/x : x, dropdims(sum(mcl_adj, dims=1), dims=1)))) mcl_norm = norm(mcl_adj) if !isfinite(mcl_norm) throw(OverflowError("The norm of the input adjacency matrix is not finite")) end next_mcl_adj = similar(mcl_adj) # do MCL iterations (disp_level > 0) && @info("Starting MCL iterations...") niter = 0 converged = false rel_delta = NaN while !converged && niter < maxiter expanded = _mcl_expand(mcl_adj, expansion) _mcl_inflate!(next_mcl_adj, expanded, inflation) _mcl_prune!(next_mcl_adj, prune_tol) # normalize in columns rmul!(next_mcl_adj, Diagonal(map(x -> x != 0.0 ? 1.0/x : x, dropdims(sum(next_mcl_adj, dims=1), dims=1)))) next_mcl_norm = norm(next_mcl_adj) if !isfinite(next_mcl_norm) @warn("MCL adjacency matrix norm is not finite") break end rel_delta = euclidean(next_mcl_adj, mcl_adj)/mcl_norm (disp_level == 2) && @info("MCL iter. #$niter: rel.Ξ”=", rel_delta) (converged = rel_delta <= tol) && break # update (swap) MCL adjacency niter += 1 mcl_adj, next_mcl_adj = next_mcl_adj, mcl_adj mcl_norm = next_mcl_norm (mcl_norm < tol) && break # matrix is zero end if disp_level > 0 if converged @info "MCL converged after $niter iteration(s)" else @warn "MCL didn't converge after $niter iteration(s)" end end (disp_level > 0) && @info("Generating MCL clusters...") el2clu, clu_sizes, nunassigned = _mcl_clusters(mcl_adj, allow_singles, tol/length(mcl_adj)) return MCLResult(save_final_matrix ? mcl_adj : similar(mcl_adj, (0,0)), el2clu, clu_sizes, nunassigned, niter, rel_delta, converged) end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
1167
# Mutual Information function _mutualinfo(A::AbstractMatrix{<:Integer}, normed::Bool) N = sum(A) (N == 0.0) && return 0.0 rows = sum(A, dims=2) cols = sum(A, dims=1) entA = entropy(A) entArows = entropy(rows) entAcols = entropy(cols) hck = (entA - entAcols)/N hc = entArows/N + log(N) hk = entAcols/N + log(N) mi = hc - hck return if normed 2*mi/(hc+hk) else mi end end """ mutualinfo(a, b; normed=true) -> Float64 Compute the *mutual information* between the two clusterings of the same data points. `a` and `b` can be either [`ClusteringResult`](@ref) instances or assignments vectors (`AbstractVector{<:Integer}`). If `normed` parameter is `true` the return value is the normalized mutual information (symmetric uncertainty), see "Data Mining Practical Machine Tools and Techniques", Witten & Frank 2005. # References > Vinh, Epps, and Bailey, (2009). *Information theoretic measures for clusterings comparison*. > Proceedings of the 26th Annual International Conference on Machine Learning - ICML β€˜09. """ mutualinfo(a, b; normed::Bool=true) = _mutualinfo(counts(a, b), normed)
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
1487
""" randindex(a, b) -> NTuple{4, Float64} Compute the tuple of Rand-related indices between the clusterings `c1` and `c2`. `a` and `b` can be either [`ClusteringResult`](@ref) instances or assignments vectors (`AbstractVector{<:Integer}`). Returns a tuple of indices: - Hubert & Arabie Adjusted Rand index - Rand index (agreement probability) - Mirkin's index (disagreement probability) - Hubert's index (``P(\\mathrm{agree}) - P(\\mathrm{disagree})``) # References > Lawrence Hubert and Phipps Arabie (1985). *Comparing partitions.* > Journal of Classification 2 (1): 193-218 > Meila, Marina (2003). *Comparing Clusterings by the Variation of > Information.* Learning Theory and Kernel Machines: 173-187. > Steinley, Douglas (2004). *Properties of the Hubert-Arabie Adjusted > Rand Index.* Psychological Methods, Vol. 9, No. 3: 386-396 """ function randindex(a, b) c11, c21, c12, c22 = confusion(Float64, a, b) # Table 2 from Steinley 2004 t = c11 + c12 + c21 + c22 # total number of pairs of entities A = c11 + c22 D = c12 + c21 # expected index ERI = (c11+c12)*(c11+c21)+(c21+c22)*(c12+c22) # adjusted Rand - Hubert & Arabie 1985 ARI = D == 0 ? 1.0 : (t*A - ERI)/(abs2(t) - ERI) # (9) from Steinley 2004 RI = A/t # Rand 1971 # Probability of agreement MI = D/t # Mirkin 1970 # p(disagreement) HI = (A-D)/t # Hubert 1977 # p(agree)-p(disagree) return (ARI, RI, MI, HI) end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
8456
# Initialization algorithms # # Each algorithm is represented by a subtype of SeedingAlgorithm # # Let alg be an instance of such an algorithm, then it should # support the following usage: # # initseeds!(iseeds, alg, X; kwargs...) # initseeds_by_costs!(iseeds, alg, costs; kwargs...) # # Here: # - iseeds: a vector of resultant indexes of the chosen seeds # - alg: the seeding algorithm instance # - X: the data matrix, each column being a data point # - costs: pre-computed pairwise cost matrix. # - kwargs: additional kw-arguments, i.e. `rng` # # This function returns iseeds # """ SeedingAlgorithm Base type for all seeding algorithms. Each seeding algorithm should implement the two functions: [`initseeds!`](@ref) and [`initseeds_by_costs!`](@ref). """ abstract type SeedingAlgorithm end """ initseeds(alg::Union{SeedingAlgorithm, Symbol}, X::AbstractMatrix, k::Integer) -> Vector{Int} Select `k` seeds from a ``dΓ—n`` data matrix `X` using the `alg` algorithm. `alg` could be either an instance of [`SeedingAlgorithm`](@ref) or a symbolic name of the algorithm. Returns the vector of `k` seed indices. """ initseeds(alg::SeedingAlgorithm, X::AbstractMatrix{<:Real}, k::Integer; kwargs...) = initseeds!(Vector{Int}(undef, k), alg, X; kwargs...) """ initseeds_by_costs(alg::Union{SeedingAlgorithm, Symbol}, costs::AbstractMatrix, k::Integer) -> Vector{Int} Select `k` seeds from the ``nΓ—n`` `costs` matrix using algorithm `alg`. Here, `costs[i, j]` is the cost of assigning points `i`` and ``j`` to the same cluster. One may, for example, use the squared Euclidean distance between the points as the cost. Returns the vector of `k` seed indices. """ initseeds_by_costs(alg::SeedingAlgorithm, costs::AbstractMatrix{<:Real}, k::Integer; kwargs...) = initseeds_by_costs!(Vector{Int}(undef, k), alg, costs; kwargs...) seeding_algorithm(s::Symbol) = s == :rand ? RandSeedAlg() : s == :kmpp ? KmppAlg() : s == :kmcen ? KmCentralityAlg() : throw(ArgumentError("Unknown seeding algorithm $s")) function check_seeding_args(n::Integer, k::Integer) k >= 1 || throw(ArgumentError("The number of seeds ($k) must be positive.")) k <= n || throw(ArgumentError("Cannot select more seeds ($k) than data points ($n).")) end check_seeding_args(X::AbstractMatrix, iseeds::AbstractVector) = check_seeding_args(size(X, 2), length(iseeds)) initseeds(algname::Symbol, X::AbstractMatrix{<:Real}, k::Integer; kwargs...) = initseeds(seeding_algorithm(algname), X, k; kwargs...)::Vector{Int} initseeds_by_costs(algname::Symbol, costs::AbstractMatrix{<:Real}, k::Integer; kwargs...) = initseeds_by_costs(seeding_algorithm(algname), costs, k; kwargs...) # use specified vector of seeds function initseeds(iseeds::AbstractVector{<:Integer}, X::AbstractMatrix{<:Real}, k::Integer; kwargs...) length(iseeds) == k || throw(ArgumentError("The length of seeds vector ($(length(iseeds))) differs from the number of seeds requested ($k)")) check_seeding_args(X, iseeds) n = size(X, 2) # check that seed indices are fine for (i, seed) in enumerate(iseeds) (1 <= seed <= n) || throw(ArgumentError("Seed #$i refers to an incorrect data point ($seed)")) end # NOTE no duplicate checks are done, should we? convert(Vector{Int}, iseeds) end initseeds_by_costs(iseeds::AbstractVector{<:Integer}, costs::AbstractMatrix{<:Real}, k::Integer; kwargs...) = initseeds(iseeds, costs, k; kwargs...) # NOTE: passing costs as X, but should be fine since only size(X, 2) is used function copyseeds!(S::AbstractMatrix{<:AbstractFloat}, X::AbstractMatrix{<:Real}, iseeds::AbstractVector) d, n = size(X) k = length(iseeds) size(S) == (d, k) || throw(DimensionMismatch("Inconsistent seeds matrix dimensions: $((d, k)) expected, $(size(S)) given.")) return copyto!(S, view(X, :, iseeds)) end """ RandSeedAlg <: SeedingAlgorithm Random seeding (`:rand`). Chooses an arbitrary subset of ``k`` data points as cluster seeds. """ struct RandSeedAlg <: SeedingAlgorithm end """ initseeds!(iseeds::AbstractVector{Int}, alg::SeedingAlgorithm, X::AbstractMatrix) -> iseeds Initialize `iseeds` with the indices of cluster seeds for the `X` data matrix using the `alg` seeding algorithm. """ function initseeds!(iseeds::AbstractVector{<:Integer}, alg::RandSeedAlg, X::AbstractMatrix{<:Real}; rng::AbstractRNG=Random.GLOBAL_RNG) check_seeding_args(X, iseeds) sample!(rng, 1:size(X, 2), iseeds; replace=false) end """ initseeds_by_costs!(iseeds::AbstractVector{Int}, alg::SeedingAlgorithm, costs::AbstractMatrix) -> iseeds Initialize `iseeds` with the indices of cluster seeds for the `costs` matrix using the `alg` seeding algorithm. Here, `costs[i, j]` is the cost of assigning points ``i`` and ``j`` to the same cluster. One may, for example, use the squared Euclidean distance between the points as the cost. """ function initseeds_by_costs!(iseeds::AbstractVector{<:Integer}, alg::RandSeedAlg, X::AbstractMatrix{<:Real}; rng::AbstractRNG=Random.GLOBAL_RNG) check_seeding_args(X, iseeds) sample!(rng, 1:size(X,2), iseeds; replace=false) end """ KmppAlg <: SeedingAlgorithm Kmeans++ seeding (`:kmpp`). Chooses the seeds sequentially. The probability of a point to be chosen is proportional to the minimum cost of assigning it to the existing seeds. # References > D. Arthur and S. Vassilvitskii (2007). > *k-means++: the advantages of careful seeding.* > 18th Annual ACM-SIAM symposium on Discrete algorithms, 2007. """ struct KmppAlg <: SeedingAlgorithm end function initseeds!(iseeds::AbstractVector{<:Integer}, alg::KmppAlg, X::AbstractMatrix{<:Real}, metric::PreMetric = SqEuclidean(); rng::AbstractRNG=Random.GLOBAL_RNG) n = size(X, 2) k = length(iseeds) check_seeding_args(n, k) # randomly pick the first center p = rand(rng, 1:n) iseeds[1] = p if k > 1 mincosts = colwise(metric, X, view(X, :, p)) mincosts[p] = 0 # pick remaining (with a chance proportional to mincosts) tmpcosts = zeros(n) for j = 2:k p = wsample(rng, 1:n, mincosts) iseeds[j] = p # update mincosts colwise!(metric, tmpcosts, X, view(X, :, p)) mincosts .= min.(mincosts, tmpcosts) mincosts[p] = 0 end end return iseeds end function initseeds_by_costs!(iseeds::AbstractVector{<:Integer}, alg::KmppAlg, costs::AbstractMatrix{<:Real}; rng::AbstractRNG=Random.GLOBAL_RNG) n = size(costs, 1) k = length(iseeds) check_seeding_args(n, k) # randomly pick the first center p = rand(rng, 1:n) iseeds[1] = p if k > 1 mincosts = costs[:, p] mincosts[p] = 0 # pick remaining (with a chance proportional to mincosts) for j = 2:k p = wsample(rng, 1:n, mincosts) iseeds[j] = p # update mincosts mincosts .= min.(mincosts, view(costs, :, p)) mincosts[p] = 0 end end return iseeds end """ KmCentralityAlg <: SeedingAlgorithm K-medoids initialization based on centrality (`:kmcen`). Choose the ``k`` points with the highest *centrality* as seeds. # References > Hae-Sang Park and Chi-Hyuck Jun. > *A simple and fast algorithm for K-medoids clustering.* > doi:10.1016/j.eswa.2008.01.039 """ struct KmCentralityAlg <: SeedingAlgorithm end function initseeds_by_costs!(iseeds::AbstractVector{<:Integer}, alg::KmCentralityAlg, costs::AbstractMatrix{<:Real}; kwargs...) n = size(costs, 1) k = length(iseeds) check_seeding_args(n, k) # scores[j] = \sum_j costs[i,j] / (\sum_{j'} costs[i,j']) scores = costs'vec(mapslices(inv∘sum, costs, dims=2)) # lower score indicates better seeds copyto!(iseeds, 1, sortperm(scores), 1, k) return iseeds end initseeds!(iseeds::AbstractVector{<:Integer}, alg::KmCentralityAlg, X::AbstractMatrix{<:Real}, metric::PreMetric = SqEuclidean(); kwargs...) = initseeds_by_costs!(iseeds, alg, pairwise(metric, X, dims=2); kwargs...)
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
4851
# Silhouette # compute silhouette scores for each point given a matrix of cluster-to-point distances function silhouettes_scores(clu_to_pt::AbstractMatrix{<:Real}, assignments::AbstractVector{<:Integer}, clu_sizes::AbstractVector{<:Integer}) n = length(assignments) @assert size(clu_to_pt) == (length(clu_sizes), n) # compute a and b # a: average distance w.r.t. the assigned cluster # b: the minimum average distance w.r.t. other cluster a = similar(clu_to_pt, n) b = similar(clu_to_pt, n) nclusters = length(clu_sizes) for j in 1:n l = assignments[j] a[j] = clu_to_pt[l, j] v = typemax(eltype(b)) @inbounds for i = 1:nclusters clu_sizes[i] == 0 && continue # skip empty clusters rij = clu_to_pt[i, j] if (i != l) && (rij < v) v = rij end end b[j] = v end # compute silhouette score sil = a # reuse the memory of a for sil for j = 1:n if clu_sizes[assignments[j]] == 1 sil[j] = 0 else #If both a[i] and b[i] are equal to 0 or Inf, silhouettes is defined as 0 @inbounds sil[j] = a[j] < b[j] ? 1 - a[j]/b[j] : a[j] > b[j] ? b[j]/a[j] - 1 : zero(eltype(sil)) end end return sil end # calculate silhouette scores (single batch) silhouettes_batch(dists::ClusterDistances, assignments::AbstractVector{<:Integer}, points::Union{AbstractMatrix, Nothing}, indices::AbstractVector{<:Integer}) = silhouettes_scores(meandistances(dists, assignments, points, indices), assignments, cluster_sizes(dists)) # batch-calculate silhouette scores (splitting the points into chunks of batch_size size) function silhouettes(dists::ClusterDistances, assignments::AbstractVector{<:Integer}, points::Union{AbstractMatrix, Nothing}, batch_size::Union{Integer, Nothing} = nothing) n = length(assignments) ((batch_size === nothing) || (n <= batch_size)) && return silhouettes_batch(dists, assignments, points, eachindex(assignments)) return mapreduce(vcat, 1:batch_size:n) do batch_start batch_ixs = batch_start:min(batch_start + batch_size - 1, n) # copy points/assignments to speed up matrix and indexing operations silhouettes_batch(dists, assignments[batch_ixs], points !== nothing ? points[:, batch_ixs] : nothing, batch_ixs) end end """ silhouettes(assignments::Union{AbstractVector, ClusteringResult}, point_dists::Matrix) -> Vector{Float64} silhouettes(assignments::Union{AbstractVector, ClusteringResult}, points::Matrix; metric::SemiMetric, [batch_size::Integer]) -> Vector{Float64} Compute *silhouette* values for individual points w.r.t. given clustering. Returns the ``n``-length vector of silhouette values for each individual point. # Arguments - `assignments::Union{AbstractVector{Int}, ClusteringResult}`: the vector of point assignments (cluster indices) - `points::AbstractMatrix`: if metric is nothing it is an ``nΓ—n`` matrix of pairwise distances between the points, otherwise it is an ``dΓ—n`` matrix of `d` dimensional clustered data points. - `metric::Union{SemiMetric, Nothing}`: an instance of Distances Metric object or nothing, indicating the distance metric used for calculating point distances. - `batch_size::Union{Integer, Nothing}`: if integer is given, calculate silhouettes in batches of `batch_size` points each, throws `DimensionMismatch` if batched calculation is not supported by given `metric`. # References > Peter J. Rousseeuw (1987). *Silhouettes: a Graphical Aid to the > Interpretation and Validation of Cluster Analysis*. Computational and > Applied Mathematics. 20: 53–65. > Marco Gaido (2023). Distributed Silhouette Algorithm: Evaluating Clustering on Big Data """ function silhouettes(assignments::AbstractVector{<:Integer}, points::AbstractMatrix; metric::Union{SemiMetric, Nothing} = nothing, batch_size::Union{Integer, Nothing} = nothing) nclusters = maximum(assignments) nclusters >= 2 || throw(ArgumentError("silhouettes() not defined for the degenerated clustering with a single cluster.")) check_assignments(assignments, nclusters) return silhouettes(ClusterDistances(metric, assignments, points, batch_size), assignments, points, batch_size) end silhouettes(R::ClusteringResult, points::AbstractMatrix; kwargs...) = silhouettes(assignments(R), points; kwargs...)
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
2078
# Common utilities ##### common types """ ClusteringResult Base type for the output of clustering algorithm. """ abstract type ClusteringResult end # vector of cluster indices for each clustered point ClusterAssignments = AbstractVector{<:Integer} ClusteringResultOrAssignments = Union{ClusteringResult, ClusterAssignments} # generic functions """ nclusters(R::ClusteringResult) -> Int Get the number of clusters. """ nclusters(R::ClusteringResult) = length(R.counts) """ counts(R::ClusteringResult) -> Vector{Int} Get the vector of cluster sizes. `counts(R)[k]` is the number of points assigned to the ``k``-th cluster. """ counts(R::ClusteringResult) = R.counts """ wcounts(R::ClusteringResult) -> Vector{Float64} wcounts(R::FuzzyCMeansResult) -> Vector{Float64} Get the weighted cluster sizes as the sum of weights of points assigned to each cluster. For non-weighted clusterings assumes the weight of every data point is 1.0, so the result is equivalent to `convert(Vector{Float64}, counts(R))`. """ wcounts(R::ClusteringResult) = convert(Vector{Float64}, counts(R)) """ assignments(R::ClusteringResult) -> Vector{Int} Get the vector of cluster indices for each point. `assignments(R)[i]` is the index of the cluster to which the ``i``-th point is assigned. """ assignments(R::ClusteringResult) = R.assignments assignments(A::ClusterAssignments) = A ##### convert display symbol to disp level const DisplayLevels = Dict(:none => 0, :final => 1, :iter => 2) display_level(s::Symbol) = get(DisplayLevels, s) do valid_vals = string.(":", first.(sort!(collect(pairs(DisplayLevels)), by=last))) throw(ArgumentError("Invalid option display=:$s ($(join(valid_vals, ", ", ", or ")) expected)")) end function check_assignments(assignments::AbstractVector{<:Integer}, nclusters::Union{Integer, Nothing}) nclu = nclusters === nothing ? maximum(assignments) : nclusters for (j, c) in enumerate(assignments) all(1 <= c <= nclu) || throw(ArgumentError("Bad assignments[$j]=$c: should be in 1:$nclu range.")) end end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
855
# Variation of Information """ varinfo(a, b) -> Float64 Compute the *variation of information* between the two clusterings of the same data points. `a` and `b` can be either [`ClusteringResult`](@ref) instances or assignments vectors (`AbstractVector{<:Integer}`). # References > Meila, Marina (2003). *Comparing Clusterings by the Variation of > Information.* Learning Theory and Kernel Machines: 173–187. """ function varinfo(a, b) C = counts(a, b) isempty(C) && return 0.0 countsA = a isa ClusteringResult ? counts(a) : sum(C, dims=2) countsB = b isa ClusteringResult ? counts(b) : sum(C, dims=1) I = 0.0 @inbounds for (i, ci) in enumerate(countsA), (j, cj) in enumerate(countsB) cij = C[i, j] if cij > 0.0 I += cij * log(cij*cij / (ci*cj)) end end return -I/sum(countsA) end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
1216
# V-measure of contingency table function _vmeasure(A::AbstractMatrix{<:Integer}; Ξ²::Real) (Ξ² >= 0) || throw(ArgumentError("Ξ² should be nonnegative")) N = sum(A) (N == 0.0) && return 0.0 entA = entropy(A) entArows = entropy(sum(A, dims=2)) entAcols = entropy(sum(A, dims=1)) hck = (entA - entAcols)/N hkc = (entA - entArows)/N hc = entArows/N + log(N) hk = entAcols/N + log(N) # Homogeneity h = hc == 0.0 ? 1.0 : 1.0 - hck/hc # Completeness c = hk == 0.0 ? 1.0 : 1.0 - hkc/hk # V-measure V_Ξ² = (1 + Ξ²)*h*c/(Ξ²*h + c) return V_Ξ² end """ vmeasure(a, b; [Ξ² = 1.0]) -> Float64 V-measure between the two clusterings. `a` and `b` can be either [`ClusteringResult`](@ref) instances or assignments vectors (`AbstractVector{<:Integer}`). The `Ξ²` parameter defines trade-off between _homogeneity_ and _completeness_: * if ``Ξ² > 1``, _completeness_ is weighted more strongly, * if ``Ξ² < 1``, _homogeneity_ is weighted more strongly. # References > Andrew Rosenberg and Julia Hirschberg, 2007. *V-Measure: A conditional > entropy-based external cluster evaluation measure* """ vmeasure(a, b; Ξ²::Real = 1.0) = _vmeasure(counts(a, b), Ξ²=float(Ξ²))
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
4819
# simple program to test affinity propagation using Test using Distances using Clustering using LinearAlgebra using Random, StableRNGs using Statistics include("test_helpers.jl") @testset "affinityprop() (affinity propagation)" begin @testset "Argument checks" begin @test_throws ArgumentError affinityprop(randn(2, 3)) @test_throws ArgumentError affinityprop(randn(1, 1)) @test_throws ArgumentError affinityprop(randn(2, 2), tol=0.0) @test_throws ArgumentError affinityprop(randn(2, 2), damp=-0.1) @test_throws ArgumentError affinityprop(randn(2, 2), damp=1.0) x = randn(2, 4) S = -pairwise(Euclidean(), x, x, dims=2) @test affinityprop(S, damp=0.5, tol=0.5) isa AffinityPropResult for disp in keys(Clustering.DisplayLevels) @test affinityprop(S, tol=0.1, display=disp) isa AffinityPropResult end end rng = StableRNG(34568) d = 10 n = 500 x = rand(rng, d, n) S = -pairwise(Euclidean(), x, x, dims=2) # set diagonal value to median value S = S - diagm(0 => diag(S)) + median(S)*I R = affinityprop(S) @test isa(R, AffinityPropResult) k = length(R.exemplars) @test 0 < k < n @test length(R.assignments) == n @test all(R.assignments .>= 1) && all(R.assignments .<= k) @test all(R.assignments[R.exemplars] .== collect(1:k)) @test length(R.counts) == k @test sum(R.counts) == n for i = 1:k @test R.counts[i] == count(==(i), R.assignments) end @testset "Support for arrays other than Matrix{T}" begin @testset "$(typeof(M))" for M in equivalent_matrices(S) R2 = affinityprop(M) @test R2.assignments == R.assignments end end #= compare with python result the reference assignments were computed using python sklearn: ```julia using PyCall @pyimport sklearn.cluster as cl af = cl.AffinityPropagation(affinity="precomputed")[:fit]( S ) ref_assignments = af[:labels_] .+ 1 ``` =# ref_assignments = [7, 30, 53, 30, 43, 55, 19, 31, 23, 16, 31, 31, 1, 14, 47, 45, 54, 48, 8, 55, 39, 45, 14, 47, 24, 27, 28, 20, 9, 8, 3, 32, 17, 16, 54, 50, 2, 46, 32, 30, 21, 52, 19, 55, 2, 47, 49, 3, 2, 45, 43, 27, 51, 4, 16, 46, 55, 11, 35, 1, 56, 40, 45, 33, 26, 26, 51, 13, 18, 4, 55, 19, 3, 52, 39, 5, 6, 43, 21, 16, 20, 34, 16, 9, 19, 3, 30, 48, 43, 30, 1, 17, 26, 30, 6, 27, 18, 2, 40, 3, 53, 7, 37, 7, 4, 21, 14, 49, 4, 39, 29, 34, 23, 22, 41, 44, 48, 39, 7, 2, 1, 23, 41, 8, 53, 31, 37, 54, 28, 2, 17, 9, 1, 10, 11, 34, 14, 8, 39, 55, 43, 37, 24, 15, 53, 4, 44, 40, 12, 51, 42, 50, 13, 15, 5, 34, 27, 2, 12, 14, 48, 10, 49, 4, 36, 53, 36, 24, 22, 36, 45, 22, 52, 19, 31, 22, 46, 10, 25, 42, 15, 25, 53, 16, 5, 25, 14, 51, 19, 50, 32, 54, 4, 45, 17, 56, 18, 41, 23, 39, 7, 53, 56, 30, 37, 12, 16, 19, 20, 20, 42, 39, 16, 45, 37, 17, 52, 15, 21, 6, 33, 41, 1, 34, 22, 19, 54, 16, 44, 31, 23, 11, 7, 24, 11, 53, 49, 55, 46, 43, 25, 51, 55, 25, 47, 16, 46, 26, 55, 14, 53, 3, 44, 34, 26, 19, 49, 35, 3, 34, 32, 27, 42, 28, 42, 42, 54, 2, 29, 21, 20, 25, 19, 9, 50, 3, 30, 14, 32, 43, 34, 12, 5, 6, 3, 50, 27, 50, 52, 51, 46, 39, 14, 12, 30, 32, 19, 19, 43, 19, 25, 40, 31, 25, 52, 30, 37, 27, 20, 8, 22, 39, 55, 25, 21, 31, 17, 16, 15, 31, 29, 17, 5, 32, 38, 4, 16, 52, 48, 18, 17, 41, 4, 23, 3, 29, 44, 50, 40, 52, 29, 9, 36, 15, 33, 13, 52, 20, 14, 38, 30, 24, 34, 40, 41, 21, 22, 24, 20, 15, 35, 36, 47, 45, 45, 23, 37, 38, 19, 26, 16, 39, 16, 31, 28, 27, 40, 41, 30, 17, 3, 14, 52, 31, 38, 28, 37, 34, 44, 53, 32, 14, 5, 23, 42, 43, 44, 22, 55, 12, 39, 20, 37, 28, 19, 33, 54, 33, 1, 10, 45, 6, 46, 47, 50, 29, 38, 26, 48, 20, 49, 32, 6, 22, 39, 34, 27, 25, 53, 28, 50, 41, 43, 49, 3, 51, 10, 27, 51, 28, 23, 44, 24, 20, 4, 28, 29, 11, 33, 52, 19, 4, 9, 14, 36, 34, 13, 31, 27, 41, 47, 35, 37, 53, 6, 56, 53, 3, 39, 7, 3, 29, 26, 1, 32, 3, 24, 38, 14, 6, 54, 23, 27, 17, 56, 29, 35, 46, 31, 46, 55, 56, 20, 32, 54, 46, 48, 26, 1, 48] @test randindex(R.assignments, ref_assignments)[2] == 1 end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
7132
using Test using Clustering, Distances @testset "clustering_quality()" begin # test data with 12 2D points and 4 clusters Y = [-2 2 2 3 2 1 2 -2 -2 -1 -2 -3 4 4 1 0 -1 0 -4 -4 1 0 -1 0] # cluster centers C = [0 2 0 -2 4 0 -4 0] # point-to-cluster assignments A = [1, 1, 2, 2, 2, 2, 3, 3, 4, 4, 4, 4] # convert A to fuzzy clusters weights W = zeros(Int, (size(Y, 2), size(C, 2))) for (i, c) in enumerate(A) W[i, c] = 1 end # fuzzy clustering with 4 clusters W2 = [ 1 0 0 0 1 0 0 0 0 1/2 0 1/2 0 1/2 0 1/2 0 1/2 0 1/2 0 1/2 0 1/2 0 0 1 0 0 0 1 0 0 1/2 0 1/2 0 1/2 0 1/2 0 1/2 0 1/2 0 1/2 0 1/2 ] # mock hard and fuzzy clusterings for testing interface; only C, W and A arguments are actually used A_kmeans = KmeansResult(Float64.(C), A, ones(12), [4, 4, 4], ones(4), 42., 42, true) W_cmeans = FuzzyCMeansResult(Float64.(C), Float64.(W), 42, true) W2_cmeans = FuzzyCMeansResult(Float64.(C), Float64.(W2), 42, true) @testset "input checks" begin @test_throws ArgumentError clustering_quality(zeros(2,2), zeros(2,3), [1, 2], quality_index = :calinski_harabasz) @test_throws DimensionMismatch clustering_quality(zeros(2,2), zeros(3,2), [1, 2], quality_index = :calinski_harabasz) @test_throws ArgumentError clustering_quality(zeros(2,2), zeros(2,1), [1, ], quality_index = :calinski_harabasz) @test_throws ArgumentError clustering_quality(zeros(2,2), zeros(2,2), [1, 2], quality_index = :calinski_harabasz) @test_throws ArgumentError clustering_quality(zeros(0,0), zeros(0,0), zeros(Int,0); quality_index = :calinski_harabasz) @test_throws ArgumentError clustering_quality(zeros(0,0), zeros(0,0), zeros(0,0); quality_index = :calinski_harabasz, fuzziness = 2) @test_throws DimensionMismatch clustering_quality([1,2,3], zeros(2,2), quality_index = :dunn) # wrong quality index @test_throws ArgumentError clustering_quality(Y, C, A; quality_index = :nonexistent_index) @test_throws ArgumentError clustering_quality(Y, C, W; quality_index = :nonexistent_index, fuzziness = 2) @test_throws ArgumentError clustering_quality(Y, A; quality_index = :nonexistent_index) end @testset "correct quality index values" begin @testset "calinski_harabasz" begin @test clustering_quality(Y, C, A; quality_index = :calinski_harabasz, metric = Euclidean()) β‰ˆ (32/3) / (16/8) @test clustering_quality(Y, A_kmeans; quality_index = :calinski_harabasz, metric = Euclidean()) β‰ˆ (32/3) / (16/8) # requires centers @test_throws ArgumentError clustering_quality(A_kmeans, pairwise(Euclidean(), Y, dims=2); quality_index = :calinski_harabasz) @test clustering_quality(Y, C, W; quality_index = :calinski_harabasz, fuzziness = 2, metric = Euclidean()) β‰ˆ (32/3) / (16/8) @test clustering_quality(Y, W_cmeans; quality_index = :calinski_harabasz, fuzziness = 2, metric = Euclidean()) β‰ˆ (32/3) / (16/8) @test_throws MethodError clustering_quality(W_cmeans, pairwise(Euclidean(), Y, dims=2); quality_index = :calinski_harabasz, fuzziness = 2) β‰ˆ (32/3) / (16/8) @test clustering_quality(Y, C, W2; quality_index = :calinski_harabasz, fuzziness = 2, metric = Euclidean()) β‰ˆ 8/3 * ( 24 ) / (14+sqrt(17)) @test clustering_quality(Y, W2_cmeans; quality_index = :calinski_harabasz, fuzziness = 2, metric = Euclidean()) β‰ˆ 8/3 * ( 24 ) / (14+sqrt(17)) @test_throws MethodError clustering_quality(W2_cmeans, pairwise(Euclidean(), Y, dims=2); quality_index = :calinski_harabasz, fuzziness = 2) end @testset "davies_bouldin" begin @test clustering_quality(Y, C, A; quality_index = :davies_bouldin, metric = Euclidean()) β‰ˆ 3/sqrt(20) @test clustering_quality(Y, A_kmeans; quality_index = :davies_bouldin, metric = Euclidean()) β‰ˆ 3/sqrt(20) # requires centers @test_throws ArgumentError clustering_quality(A_kmeans, pairwise(Euclidean(), Y, dims=2); quality_index = :davies_bouldin) β‰ˆ 3/sqrt(20) # fuzziness not supported @test_throws ArgumentError clustering_quality(Y, W_cmeans; quality_index = :davies_bouldin, fuzziness = 2) end @testset "dunn" begin @test clustering_quality(Y, C, A; quality_index = :dunn, metric = Euclidean()) β‰ˆ 1/2 @test clustering_quality(Y, A_kmeans; quality_index = :dunn, metric = Euclidean()) β‰ˆ 1/2 @test clustering_quality(A_kmeans, pairwise(Euclidean(), Y, dims=2); quality_index = :dunn) β‰ˆ 1/2 # fuzziness not supported @test_throws ArgumentError clustering_quality(Y, W_cmeans; quality_index = :dunn, fuzziness = 2) end @testset "xie_beni" begin @test clustering_quality(Y, C, A; quality_index = :xie_beni, metric = Euclidean()) β‰ˆ 1/3 @test clustering_quality(Y, C, W; quality_index = :xie_beni, fuzziness = 2, metric = Euclidean()) β‰ˆ 1/3 @test clustering_quality(Y, W_cmeans; quality_index = :xie_beni, fuzziness = 2, metric = Euclidean()) β‰ˆ 1/3 @test clustering_quality(Y, C, W2; quality_index = :xie_beni, fuzziness = 2, metric = Euclidean()) β‰ˆ (14+sqrt(17)) / (12 * 4) @test clustering_quality(Y, W2_cmeans; quality_index = :xie_beni, fuzziness = 2, metric = Euclidean()) β‰ˆ (14+sqrt(17)) / (12 * 4) end @testset "silhouettes" begin avg_silh = 1 - 1/12*( # average over silhouettes 1 - a_i * 1/b_i + 4 * 16 /(3+2sqrt(17)+5) # 4 points in clusters 1 and 3 + 4 * (2sqrt(2)+2)/3 * 1/4 # 4 points in clusters 2 and 4, top + bottom + 2 * (2sqrt(2)+2)/3 * 4/(4+2sqrt(26)+6) # 2 points clusters 2 and 4, left and right + 2 * (2sqrt(2)+2)/3 * 4/(2+2sqrt(10)+4) # 2 points clusters 2 and 4, center ) @test clustering_quality(Y, A; quality_index = :silhouettes, metric = Euclidean()) β‰ˆ avg_silh @test clustering_quality(Y, A_kmeans; quality_index = :silhouettes, metric = Euclidean()) β‰ˆ avg_silh @test clustering_quality(A_kmeans, pairwise(Euclidean(), Y, dims=2); quality_index = :silhouettes) β‰ˆ avg_silh # fuzziness not supported @test_throws ArgumentError clustering_quality(Y, W_cmeans; quality_index = :silhouettes, fuzziness = 2) end end @testset "empty clusters" begin # degenerated clustering, no 4th cluster degenC = [0 2 0 -2 -2 4 0 -4 0 0] degenA = [1, 1, 2, 2, 2, 2, 3, 3, 5, 5, 5, 5] @test_logs((:warn, "Detected empty cluster(s): #4. clustering_quality() results might be incorrect."), clustering_quality(Y, degenC, degenA; quality_index = :calinski_harabasz)) @test clustering_quality(Y, degenC, degenA; quality_index = :calinski_harabasz, metric = Euclidean()) β‰ˆ (32/3) / (16/8) end end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
1638
# Test confusion matrix using Test using Clustering @testset "confusion() (Confusion matrix)" begin @testset "small size tests" begin @test confusion([0,0,0], [0,0,0]) == [3 0; 0 0] @test confusion([0,0,1], [0,0,0]) == [1 0; 2 0] @test confusion([0,1,1], [0,0,0]) == [1 0; 2 0] @test confusion([1,1,1], [0,0,0]) == [3 0; 0 0] @test confusion([0,0,0], [0,0,1]) == [1 2; 0 0] @test confusion([0,0,1], [0,0,1]) == [1 0; 0 2] @test confusion([0,1,1], [0,0,1]) == [0 1; 1 1] @test confusion([1,1,1], [0,0,1]) == [1 2; 0 0] @test confusion([0,0,0], [0,1,1]) == [1 2; 0 0] @test confusion([0,0,1], [0,1,1]) == [0 1; 1 1] @test confusion([0,1,1], [0,1,1]) == [1 0; 0 2] @test confusion([1,1,1], [0,1,1]) == [1 2; 0 0] @test confusion([0,0,0], [1,1,1]) == [3 0; 0 0] @test confusion([0,0,1], [1,1,1]) == [1 0; 2 0] @test confusion([0,1,1], [1,1,1]) == [1 0; 2 0] @test confusion([1,1,1], [1,1,1]) == [3 0; 0 0] end @testset "specifying element type" begin @test @inferred(confusion(Int, [1,1,1], [1,1,1])) isa Matrix{Int} @test @inferred(confusion(Float64, [1,1,1], [1,1,1])) isa Matrix{Float64} end @testset "comparing 2 k-means clusterings" begin m = 3 n = 100 k = 1 x = rand(m, n) # non-weighted r1 = kmeans(x, k; maxiter=5) r2 = kmeans(x, k; maxiter=5) C = confusion(r1, r2) @test C == [n*(n-1)/2 0; 0 0] C = confusion(Float64, r1, r2) @test C == [n*(n-1)/2 0; 0 0] end end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
2315
# Test cross-tabulation # simple ClusteringResult subtype for testing struct SimpleCluRes{T} <: ClusteringResult assignments::Vector{T} counts::Vector{Int} SimpleCluRes(assignments::AbstractVector{T}) where {T<:Integer} = new{T}(assignments, isempty(assignments) ? Vector{Int}() : counts(assignments, (1:maximum(assignments)))) end @testset "counts() (contingency matrix)" begin # Clustering's counts() @test counts(SimpleCluRes(Int[]), Int[]) == Matrix{Int}(undef, 0, 0) @test_throws DimensionMismatch counts(SimpleCluRes([1]), Int[]) @test_throws DimensionMismatch counts(SimpleCluRes([1]), [2, 1]) #@test_throws ArgumentError counts([1, 1], [0, 1]) # doesn't throw as StatsBase.counts() is called #@test_throws ArgumentError counts([1, -1], [2, 1]) # doesn't throw as StatsBase.counts() is called @test_throws ArgumentError counts(SimpleCluRes([1, 1]), [0, 1]) @test_throws ArgumentError counts(SimpleCluRes([1, -1]), [2, 1]) # supports different Integer types @test counts(SimpleCluRes(Int32[1, 2]), Int16[1, 2]) == Matrix{Int}(I, 2, 2) @test counts([1, 2, 3], [1, 2, 3]) == Matrix{Int}(I, 3, 3) @test counts([2, 3, 1], [1, 2, 3]) == [0 0 1; 1 0 0; 0 1 0] # 3rd cluster in A missing @test counts([2, 4, 1], [1, 2, 3]) == [0 0 1; 1 0 0; 0 0 0; 0 1 0] @test counts(SimpleCluRes([2, 4, 1]), [1, 2, 3]) == [0 0 1; 1 0 0; 0 0 0; 0 1 0] # 1st cluster in B missing (StatsBase.counts() and Clustering.counts() give different results) @test counts([2, 3, 1], [2, 2, 3]) == [0 1; 1 0; 1 0] @test counts(SimpleCluRes([2, 3, 1]), [2, 2, 3]) == [0 0 1; 0 1 0; 0 1 0] @test counts([2, 2, 1], [1, 1, 1]) == reshape([1; 2], (2, 1)) @test counts([1, 1, 1], [2, 2, 1]) == [1 2] @testset "with ClusteringResult objects" begin Random.seed!(34568) X = rand(3, 25) clu3 = kmeans(X, 3) clu5 = kmeans(X, 5) Y = rand(3, 20) cluY = kmeans(Y, 3) @test_throws DimensionMismatch counts(clu3, cluY) @test counts(clu3, clu5) == counts(clu5, clu3)' @test size(counts(clu3, clu5)) == (3, 5) @test sum(counts(clu3, clu5)) == 25 @test counts(clu3, clu5) == counts(clu3, assignments(clu5)) @test counts(clu3, clu5) == counts(assignments(clu3), clu5) @test counts(clu3, clu5) == counts(assignments(clu3), assignments(clu5)) end end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
12393
using Test using Clustering using Distances include("test_helpers.jl") @testset "dbscan() (DBSCAN clustering)" begin @testset "Argument checks" begin Random.seed!(34568) @test_throws ArgumentError dbscan(randn(2, 3), 1.0, metric=nothing, min_neighbors=1) @test_throws ArgumentError dbscan(randn(1, 1), 1.0, metric=nothing, min_neighbors=1) @test_throws ArgumentError dbscan(randn(2, 2), -1.0, metric=nothing, min_neighbors=1) @test_throws ArgumentError dbscan(randn(2, 2), 1.0, metric=nothing, min_neighbors=0) @test @inferred(dbscan(randn(2, 2), 0.5, metric=nothing, min_neighbors=1)) isa DbscanResult end @testset "clustering synthetic data with 3 clusters" begin Random.seed!(34568) n = 200 X = hcat(randn(2, n) .+ [0., 5.], randn(2, n) .+ [-5., 0.], randn(2, n) .+ [5., 0.]) # cluster using distrance matrix D = pairwise(Euclidean(), X, dims=2) R = @inferred(dbscan(D, 1.0, min_neighbors=10, metric=nothing)) k = nclusters(R) # println("k = $k") @test k == 3 @test k == length(R.seeds) @test all(<=(k), R.assignments) @test length(R.assignments) == size(X, 2) @test length(R.counts) == k @test [count(==(c), R.assignments) for c in 1:k] == R.counts @test all(>=(n*0.9), R.counts) # have cores @test all(clu -> length(clu.core_indices) > 0, R.clusters) # have boundaries @test all(clu -> length(clu.boundary_indices) > 0, R.clusters) @testset "NNTree-based implementation gives same clustering as distance matrix-based one" begin R2 = @inferred(dbscan(X, 1.0, metric=Euclidean(), min_neighbors=10)) @test R2 isa DbscanResult @test nclusters(R2) == nclusters(R) @test R2.assignments == R.assignments end @testset "Support for arrays other than Matrix{T}" begin @testset "$(typeof(M))" for M in equivalent_matrices(D) R2 = dbscan(M, 1.0, min_neighbors=10, metric=nothing) # run on complete subarray @test nclusters(R2) == nclusters(R) @test R2.assignments == R.assignments end end @testset "Deprecated distance matrix API" begin R2 = @test_deprecated(dbscan(D, 1.0, 10)) @test R2.assignments == R.assignments end end @testset "detecting outliers (#190)" begin v = vcat([.828 .134 .821 .630 .784 .674 .436 .089 .777 .526 .200 .908 .929 .835 .553 .647 .672 .234 .536 .617]) r = @inferred(dbscan(v, 0.075, min_cluster_size=3)) @test nclusters(r) == 3 @test findall(==(0), r.assignments) == [7] @test r.clusters[1].core_indices == [1, 3, 5, 9, 12, 13, 14] @test isempty(r.clusters[1].boundary_indices) @test r.clusters[2].core_indices == [2, 8, 11, 18] @test isempty(r.clusters[2].boundary_indices) @test r.clusters[3].core_indices == [4, 6, 10, 15, 16, 17, 19, 20] @test isempty(r.clusters[3].boundary_indices) # outlier pt #7 assigned to a 3rd cluster when bigger radius is used r2 = @inferred(dbscan(v, 0.1, min_cluster_size=3)) @test r2.assignments == setindex!(copy(r.assignments), 3, 7) end @testset "normal points" begin p0 = randn(StableRNG(0), 3, 1000) p1 = randn(StableRNG(1), 3, 1000) .+ [3.0, 3.0, 0.0] p2 = randn(StableRNG(2), 3, 1000) .+ [-3.0, -3.0, 0.0] points = [p0 p1 p2] # FIXME Current tests depend too much on a specific random sequence # We need better tests, that check point coordinates rather their indices inds_1 = [1, 3, 4, 5, 6, 9, 10, 12, 18, 22, 26, 29, 33, 35, 36, 39, 40, 42, 43, 46, 48, 50, 51, 52, 56, 57, 58, 60, 62, 63, 65, 70, 71, 72, 73, 74, 76, 80, 81, 84, 85, 86, 90, 91, 94, 95, 97, 100, 101, 102, 104, 107, 108, 112, 113, 114, 116, 117, 118, 119, 123, 124, 125, 126, 128, 129, 130, 131, 133, 134, 135, 136, 137, 138, 142, 145, 155, 157, 159, 160, 161, 162, 167, 168, 169, 170, 172, 174, 175, 177, 180, 181, 182, 184, 185, 187, 189, 190, 191, 197, 199, 204, 205, 208, 209, 212, 215, 217, 218, 219, 221, 223, 225, 227, 228, 229, 230, 231, 237, 239, 240, 241, 247, 248, 249, 251, 254, 256, 259, 261, 264, 265, 266, 268, 274, 277, 282, 283, 284, 285, 287, 288, 289, 290, 293, 294, 295, 298, 304, 305, 307, 308, 311, 312, 316, 317, 319, 320, 321, 323, 325, 330, 335, 339, 340, 343, 344, 345, 347, 363, 364, 365, 366, 367, 370, 371, 373, 378, 383, 385, 388, 391, 393, 396, 400, 402, 403, 404, 405, 406, 409, 411, 412, 415, 416, 417, 418, 421, 422, 423, 425, 426, 427, 430, 433, 435, 440, 441, 442, 444, 448, 450, 451, 453, 462, 464, 467, 472, 473, 474, 476, 482, 484, 485, 488, 489, 490, 492, 494, 496, 497, 498, 499, 500, 501, 503, 504, 505, 506, 508, 515, 519, 520, 526, 529, 530, 531, 532, 533, 536, 537, 542, 548, 556, 559, 562, 563, 565, 566, 567, 570, 574, 575, 576, 582, 584, 587, 588, 590, 591, 598, 600, 601, 602, 603, 604, 605, 608, 609, 612, 613, 614, 617, 621, 622, 623, 625, 627, 628, 629, 635, 636, 639, 641, 647, 650, 653, 655, 657, 659, 660, 661, 662, 665, 666, 667, 670, 671, 673, 674, 675, 676, 677, 679, 681, 683, 686, 688, 691, 694, 695, 696, 699, 701, 704, 706, 708, 711, 712, 713, 715, 717, 719, 720, 723, 724, 727, 729, 730, 731, 735, 739, 740, 741, 742, 743, 744, 746, 747, 750, 751, 755, 756, 761, 770, 772, 773, 774, 775, 780, 784, 787, 788, 790, 792, 794, 797, 800, 801, 802, 805, 806, 808, 809, 813, 814, 815, 816, 817, 821, 822, 824, 826, 827, 828, 830, 832, 833, 834, 835, 837, 843, 846, 847, 848, 850, 851, 854, 855, 857, 859, 862, 863, 864, 867, 869, 870, 872, 873, 875, 876, 878, 879, 880, 881, 884, 886, 887, 888, 889, 890, 892, 894, 901, 902, 908, 909, 913, 914, 917, 918, 919, 920, 922, 924, 928, 933, 934, 935, 936, 938, 940, 941, 943, 944, 948, 949, 950, 952, 953, 954, 960, 961, 965, 966, 970, 971, 979, 980, 983, 985, 986, 987, 990, 991, 993, 996, 1000, 1339, 2143] inds_2 = [132, 1001, 1003, 1006, 1008, 1011, 1014, 1015, 1017, 1018, 1019, 1020, 1023, 1024, 1027, 1028, 1034, 1036, 1039, 1042, 1044, 1045, 1047, 1049, 1051, 1052, 1056, 1057, 1058, 1059, 1064, 1065, 1068, 1070, 1071, 1076, 1081, 1084, 1087, 1089, 1090, 1093, 1094, 1095, 1096, 1097, 1099, 1100, 1102, 1103, 1108, 1110, 1111, 1112, 1113, 1119, 1120, 1123, 1124, 1125, 1130, 1131, 1136, 1140, 1142, 1143, 1146, 1147, 1156, 1158, 1161, 1162, 1167, 1168, 1172, 1174, 1176, 1177, 1178, 1179, 1183, 1186, 1187, 1190, 1191, 1192, 1193, 1200, 1201, 1202, 1203, 1206, 1209, 1210, 1212, 1213, 1215, 1217, 1219, 1222, 1226, 1229, 1230, 1231, 1232, 1233, 1239, 1241, 1242, 1244, 1246, 1247, 1249, 1250, 1251, 1256, 1257, 1258, 1260, 1261, 1263, 1264, 1265, 1266, 1268, 1275, 1276, 1282, 1285, 1286, 1287, 1291, 1293, 1294, 1295, 1300, 1303, 1307, 1308, 1313, 1315, 1318, 1320, 1325, 1331, 1333, 1336, 1337, 1341, 1345, 1346, 1347, 1348, 1350, 1351, 1355, 1358, 1360, 1361, 1362, 1364, 1365, 1368, 1370, 1372, 1373, 1374, 1378, 1379, 1381, 1382, 1383, 1386, 1392, 1393, 1394, 1396, 1397, 1398, 1400, 1401, 1405, 1406, 1408, 1410, 1413, 1415, 1416, 1418, 1419, 1420, 1421, 1426, 1431, 1433, 1434, 1437, 1441, 1445, 1446, 1447, 1448, 1452, 1453, 1454, 1455, 1459, 1462, 1463, 1464, 1466, 1467, 1468, 1473, 1474, 1476, 1477, 1478, 1480, 1484, 1485, 1487, 1489, 1490, 1492, 1493, 1499, 1501, 1502, 1503, 1504, 1505, 1507, 1514, 1515, 1517, 1519, 1521, 1522, 1524, 1526, 1528, 1529, 1534, 1541, 1542, 1544, 1545, 1546, 1551, 1552, 1553, 1555, 1556, 1561, 1564, 1566, 1567, 1568, 1569, 1571, 1574, 1575, 1576, 1583, 1586, 1588, 1589, 1590, 1592, 1594, 1596, 1597, 1598, 1599, 1601, 1602, 1603, 1604, 1606, 1607, 1608, 1609, 1610, 1612, 1615, 1618, 1619, 1620, 1621, 1622, 1623, 1624, 1627, 1629, 1633, 1635, 1641, 1643, 1646, 1647, 1648, 1649, 1650, 1651, 1652, 1654, 1656, 1658, 1659, 1661, 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, 1670, 1673, 1678, 1683, 1684, 1685, 1690, 1692, 1696, 1700, 1701, 1703, 1705, 1706, 1708, 1709, 1710, 1712, 1713, 1716, 1718, 1719, 1720, 1722, 1723, 1725, 1726, 1727, 1729, 1730, 1736, 1737, 1738, 1739, 1740, 1742, 1743, 1744, 1747, 1748, 1749, 1752, 1755, 1758, 1761, 1769, 1771, 1775, 1776, 1777, 1785, 1787, 1791, 1793, 1794, 1795, 1796, 1797, 1798, 1799, 1803, 1805, 1806, 1808, 1811, 1816, 1818, 1821, 1822, 1827, 1828, 1829, 1830, 1831, 1834, 1838, 1839, 1845, 1849, 1850, 1851, 1852, 1853, 1857, 1859, 1864, 1867, 1869, 1870, 1871, 1872, 1878, 1886, 1888, 1889, 1898, 1900, 1901, 1904, 1908, 1912, 1913, 1914, 1915, 1916, 1917, 1919, 1921, 1924, 1929, 1932, 1933, 1935, 1936, 1938, 1940, 1941, 1942, 1948, 1949, 1951, 1952, 1954, 1955, 1957, 1962, 1964, 1965, 1966, 1973, 1976, 1977, 1978, 1979, 1984, 1985, 1988, 1993, 1994, 1996, 1998, 1999] inds_3 = [589, 703, 2002, 2004, 2005, 2006, 2008, 2010, 2014, 2015, 2016, 2017, 2019, 2022, 2023, 2024, 2025, 2031, 2032, 2035, 2036, 2038, 2041, 2042, 2044, 2046, 2048, 2052, 2053, 2056, 2057, 2058, 2059, 2060, 2063, 2066, 2070, 2071, 2072, 2073, 2075, 2076, 2078, 2080, 2081, 2083, 2085, 2088, 2089, 2093, 2096, 2097, 2098, 2099, 2101, 2103, 2105, 2106, 2107, 2108, 2109, 2110, 2111, 2112, 2113, 2114, 2115, 2116, 2117, 2120, 2124, 2125, 2126, 2127, 2128, 2129, 2135, 2136, 2138, 2142, 2144, 2146, 2147, 2151, 2152, 2155, 2163, 2164, 2165, 2166, 2172, 2173, 2176, 2177, 2178, 2185, 2186, 2187, 2189, 2190, 2191, 2193, 2195, 2196, 2197, 2198, 2200, 2201, 2203, 2204, 2205, 2211, 2213, 2214, 2215, 2218, 2219, 2221, 2228, 2231, 2233, 2236, 2237, 2238, 2239, 2240, 2241, 2242, 2245, 2249, 2250, 2251, 2252, 2253, 2258, 2259, 2260, 2265, 2270, 2273, 2274, 2275, 2277, 2279, 2280, 2281, 2282, 2283, 2284, 2285, 2286, 2287, 2290, 2291, 2292, 2294, 2296, 2297, 2299, 2301, 2303, 2304, 2305, 2306, 2310, 2311, 2312, 2316, 2317, 2322, 2324, 2325, 2329, 2331, 2332, 2333, 2334, 2335, 2336, 2339, 2341, 2342, 2343, 2344, 2349, 2350, 2351, 2355, 2356, 2359, 2360, 2362, 2364, 2365, 2368, 2370, 2372, 2377, 2380, 2384, 2385, 2386, 2388, 2393, 2403, 2404, 2405, 2406, 2407, 2410, 2411, 2412, 2415, 2418, 2420, 2421, 2422, 2423, 2427, 2428, 2430, 2433, 2434, 2437, 2439, 2440, 2441, 2442, 2443, 2445, 2449, 2452, 2453, 2455, 2458, 2459, 2462, 2464, 2473, 2474, 2479, 2481, 2482, 2484, 2485, 2487, 2488, 2489, 2493, 2494, 2495, 2499, 2504, 2508, 2511, 2513, 2515, 2517, 2521, 2524, 2528, 2530, 2534, 2535, 2536, 2537, 2539, 2540, 2541, 2543, 2545, 2547, 2548, 2549, 2550, 2551, 2555, 2560, 2561, 2562, 2564, 2570, 2572, 2574, 2576, 2578, 2583, 2584, 2585, 2587, 2594, 2595, 2597, 2598, 2599, 2602, 2603, 2605, 2608, 2610, 2611, 2612, 2614, 2618, 2620, 2621, 2623, 2625, 2626, 2627, 2629, 2631, 2632, 2634, 2636, 2637, 2641, 2643, 2647, 2648, 2649, 2652, 2655, 2656, 2657, 2663, 2670, 2672, 2674, 2675, 2676, 2677, 2679, 2680, 2685, 2687, 2691, 2693, 2695, 2696, 2697, 2698, 2700, 2702, 2703, 2706, 2707, 2711, 2713, 2715, 2716, 2717, 2718, 2719, 2721, 2722, 2723, 2724, 2726, 2728, 2730, 2736, 2737, 2739, 2740, 2741, 2745, 2747, 2750, 2752, 2754, 2755, 2758, 2759, 2763, 2764, 2765, 2767, 2770, 2771, 2772, 2774, 2777, 2783, 2784, 2786, 2787, 2790, 2794, 2797, 2800, 2801, 2802, 2803, 2804, 2806, 2807, 2808, 2811, 2817, 2818, 2819, 2822, 2823, 2827, 2830, 2833, 2838, 2839, 2842, 2843, 2844, 2845, 2846, 2850, 2851, 2852, 2857, 2861, 2862, 2863, 2866, 2876, 2877, 2878, 2880, 2881, 2882, 2884, 2885, 2888, 2890, 2891, 2893, 2894, 2895, 2897, 2902, 2904, 2905, 2906, 2909, 2910, 2911, 2915, 2918, 2919, 2922, 2924, 2925, 2926, 2928, 2929, 2930, 2931, 2932, 2933, 2934, 2935, 2936, 2940, 2941, 2942, 2947, 2948, 2950, 2951, 2952, 2960, 2966, 2972, 2973, 2974, 2976, 2977, 2978, 2983, 2985, 2987, 2990, 2991, 2992, 2994, 2996] clustering = dbscan(points, 0.3, min_neighbors=3, min_cluster_size=100, leafsize=20) @test nclusters(clustering) == 3 clusters = clustering.clusters @test clusters[1].core_indices == inds_1 @test clusters[2].core_indices == inds_2 @test clusters[3].core_indices == inds_3 @testset "Issue #84" begin clu1 = dbscan(convert(Matrix{Float32}, points), 0.3f0, min_neighbors=3, min_cluster_size=100, leafsize=20) @test nclusters(clu1) == 3 clu2 = dbscan(convert(Matrix{Float32}, points), 0.3, min_neighbors=3, min_cluster_size=100, leafsize=20) @test nclusters(clu2) == nclusters(clu1) for i in 1:min(nclusters(clu1), nclusters(clu2)) c1 = clu1.clusters[i] c2 = clu2.clusters[i] @test c1.core_indices == c2.core_indices @test c1.boundary_indices == c2.boundary_indices end end end end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
2519
using Test using Clustering using Random, StableRNGs @testset "fuzzy_cmeans()" begin rng = StableRNG(42) @testset "Argument checks" begin @test_throws ArgumentError fuzzy_cmeans(randn(2, 3), 1, 2.0) @test_throws ArgumentError fuzzy_cmeans(randn(2, 3), 4, 2.0) @test_throws ArgumentError fuzzy_cmeans(randn(2, 3), 2, 1.0) for disp in keys(Clustering.DisplayLevels) @test fuzzy_cmeans(randn(2, 3), 2, 2.0, tol=0.1, display=disp) isa FuzzyCMeansResult end end Random.seed!(rng, 34568) d = 3 n = 1000 k = 5 x = rand(rng, d, n) @testset "fuzziness = 2.0" begin fuzziness = 2.0 Random.seed!(rng, 34568) @test_logs (:warn, r"^Fuzzy C-means terminated without convergence") fuzzy_cmeans(x, k, fuzziness; maxiter=1, rng=rng) r = fuzzy_cmeans(x, k, fuzziness; rng=rng) @test isa(r, FuzzyCMeansResult{Float64}) @test nclusters(r) == k @test size(r.centers) == (d, k) @test size(r.weights) == (n, k) @test all(0 .<= r.weights .<= 1) @test sum(r.weights, dims=2) β‰ˆ fill(1.0, n) @test wcounts(r) isa Vector{Float64} @test length(wcounts(r)) == n @test all(0 .<= wcounts(r) .<= n) @test sum(wcounts(r)) β‰ˆ n end @testset "fuzziness = 3.0" begin fuzziness = 3.0 Random.seed!(rng, 34568) r = fuzzy_cmeans(x, k, fuzziness, rng=rng) @test isa(r, FuzzyCMeansResult{Float64}) @test nclusters(r) == k @test size(r.centers) == (d, k) @test size(r.weights) == (n, k) @test sum(r.weights, dims=2) β‰ˆ fill(1.0, n) @test all(0 .<= r.weights .<= 1) @test wcounts(r) isa Vector{Float64} @test length(wcounts(r)) == n @test all(0 .<= wcounts(r) .<= n) @test sum(wcounts(r)) β‰ˆ n end @testset "Abstract data matrix" begin fuzziness = 2.0 Random.seed!(rng, 34568) r = fuzzy_cmeans(view(x, :, :), k, fuzziness, rng=rng) @test isa(r, FuzzyCMeansResult{Float64}) @test nclusters(r) == k @test size(r.centers) == (d, k) @test size(r.weights) == (n, k) @test sum(r.weights, dims=2) β‰ˆ fill(1.0, n) @test all(0 .<= r.weights .<= 1) @test wcounts(r) isa Vector{Float64} @test length(wcounts(r)) == n @test all(0 .<= wcounts(r) .<= n) @test sum(wcounts(r)) β‰ˆ n end @testset "Float32" begin fuzziness = 2.0 xf32 = convert(Matrix{Float32},x) Random.seed!(rng, 34568) r = fuzzy_cmeans(xf32, k, fuzziness, rng=rng) @test isa(r, FuzzyCMeansResult{Float32}) @test eltype(r.centers) == Float32 @test wcounts(r) isa Vector{Float64} end end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
9212
using Clustering using Test using CodecZlib using Distances using DelimitedFiles using Random, StableRNGs @testset "hclust() (hierarchical clustering)" begin rng = StableRNG(42) @testset "param checks" begin Random.seed!(rng, 42) D = rand(rng, 5, 5) Dsym = D + D' Dnan = copy(Dsym) Dnan[1, 3] = Dnan[3, 1] = NaN @testset "hclust()" begin @test_throws ArgumentError hclust(Dsym, linkage=:typo) @test_throws ArgumentError hclust(D, linkage=:single) @test_throws ArgumentError hclust(Dnan, linkage=:single) end hclu = @inferred(hclust(Dsym, linkage=:single)) @test hclu isa Clustering.Hclust @testset "cutree()" begin @test_throws ArgumentError cutree(hclu) @test_throws ArgumentError cutree(hclu, k=-1) @test_throws ArgumentError cutree(hclu, k=0) @test cutree(hclust(Dsym), k=10) isa Vector{Int} @test cutree(hclust(fill(0.0, 0, 0)), k=0) == Int[] @test cutree(hclust(fill(0.0, 0, 0)), k=1) == Int[] end end @testset "R hclust() generated examples" begin # load the examples hclu_examples_filename = joinpath(@__DIR__, "data", "hclust_generated_examples.jl.gz") Base.include_string(@__MODULE__, open(io -> read(io, String), GzipDecompressorStream, hclu_examples_filename), hclu_examples_filename) # test to make sure many random examples match R's implementation @testset "example #$i (linkage=:$(example["linkage"]), n=$(size(example["D"], 1)))" for (i, example) in enumerate(examples) linkage = example["linkage"] hclu = @inferred(hclust(example["D"], linkage=linkage)) @test hclu isa Clustering.Hclust @test Clustering.nnodes(hclu) == size(example["D"], 1) @test Clustering.nmerges(hclu) == Clustering.nnodes(hclu)-1 @test Clustering.height(hclu) β‰ˆ maximum(example["height"]) atol=1e-5 @test hclu.merges == example["merge"] @test hclu.heights β‰ˆ example["height"] atol=1e-5 @test hclu.order == example["order"] # compare hclust_nn_lw() (the default) and hclust_nn() (slower) methods if linkage ∈ [:complete, :average] @testset "hclust_nn()" begin linkage_fun = linkage == :complete ? Clustering.slicemaximum : Clustering.slicemean hclu2 = Hclust(Clustering.orderbranches_r!(Clustering.hclust_nn(example["D"], linkage_fun)), linkage) @test hclu2.merges == hclu.merges @test hclu2.heights β‰ˆ hclu.heights atol=1e-5 @test hclu2.order == hclu.order end end local cut_k = example["cut_k"] local cut_h = example["cut_h"] if cut_h !== nothing # due to small arithmetic differences between R and Julia heights might be slightly different # find the matching height cut_h_r = cut_h cut_h_ix = findmin(abs.(hclu.heights .- cut_h_r))[2] cut_h = hclu.heights[cut_h_ix] @assert isapprox(cut_h, cut_h_r, atol=1e-6) "h=$cut_h β‰ˆ h_R=$cut_h_r" end @testset "cutree(hclu, k=$(repr(cut_k)), h=$(repr(cut_h)))" begin cutt = @inferred(cutree(hclu, k=cut_k, h=cut_h)) @test cutt isa Vector{Int} @test length(cutt) == Clustering.nnodes(hclu) @test cutt == example["cutree"] end end end @testset "hclust_n3()" begin # no thorough testing (it's O(NΒ³)), just test one example example_n3 = examples[10] hclu_n3 = @inferred(Clustering.orderbranches_r!(Clustering.hclust_n3(example_n3["D"], Clustering.slicemaximum))) @test hclu_n3.mleft == example_n3["merge"][:, 1] @test hclu_n3.mright == example_n3["merge"][:, 2] @test hclu_n3.heights β‰ˆ example_n3["height"] atol=1e-5 end local hclust_linkages = [:single, :average, :complete] @testset "hclust(0Γ—0 matrix, linkage=$linkage)" for linkage in hclust_linkages hclu = hclust(fill(0.0, 0, 0), linkage=linkage) @test Clustering.nnodes(hclu) == 0 @test Clustering.nmerges(hclu) == 0 @test Clustering.height(hclu) == -Inf cut1 = @inferred(cutree(hclu, h=1)) @test cut1 == Int[] end @testset "hclust([$dist] 1Γ—1 matrix, linkage=$linkage)" for linkage in hclust_linkages, dist in [-Inf, 0, 1, 2, Inf] hclu = hclust(fill(dist, 1, 1), linkage=linkage) @test Clustering.nnodes(hclu) == 1 @test Clustering.nmerges(hclu) == 0 @test Clustering.height(hclu) == -Inf cut1 = @inferred(cutree(hclu, h=1)) @test cut1 == [1] end @testset "hclust(linkage=$linkage) when data contains an isolated point (#109)" for linkage in hclust_linkages # point #2 is isolated: distances to all the other points are Inf mdist = [ 0.0 Inf 0.1 Inf Inf Inf Inf Inf Inf Inf; Inf 0.0 Inf Inf Inf Inf Inf Inf Inf Inf; 0.1 Inf 0.0 0.11 Inf Inf Inf Inf Inf Inf; Inf Inf 0.11 0.0 0.86 0.86 Inf Inf Inf Inf; Inf Inf Inf 0.86 0.0 0.67 0.72 1.93 Inf Inf; Inf Inf Inf 0.86 0.67 0.0 0.72 Inf Inf Inf; Inf Inf Inf Inf 0.72 0.72 0.0 Inf 0.72 Inf; Inf Inf Inf Inf 1.93 Inf Inf 0.0 3.91 Inf; Inf Inf Inf Inf Inf Inf 0.72 3.91 0.0 0.52; Inf Inf Inf Inf Inf Inf Inf Inf 0.52 0.0] hclu = hclust(mdist, linkage=linkage) @test Clustering.nnodes(hclu) == 10 @test Clustering.nmerges(hclu) == 9 @test Clustering.height(hclu) == Inf cut1 = @inferred(cutree(hclu, h=1)) @test cut1 isa Vector{Int} @test length(cut1) == Clustering.nnodes(hclu) if linkage == :single @test cut1 == [1, 2, 1, 1, 1, 1, 1, 3, 1, 1] end end @testset "cutree(hclust, h=h) when the height of all subtrees greater than h (#141)" begin A = [0.0 0.7; 0.7 0.0] hA = hclust(A, linkage=:average) @test cutree(hA, h=0.5) == [1, 2] @test cutree(hA, h=0.7) == [1, 1] @test cutree(hA, h=0.9) == [1, 1] end @testset "Leaf ordering methods" begin n = 10 mat = zeros(Int, n, n) for i in 1:n last = minimum([i+Int(floor(n/5)), n]) for j in i:last mat[i,j] = 1 end end dm = pairwise(Euclidean(), mat, dims=2) hcl_r = hclust(dm, linkage=:average) hcl_barjoseph = hclust(dm, linkage=:average, branchorder=:barjoseph) hcl_optimal = hclust(dm, linkage=:average, branchorder=:optimal) @test hcl_r.order == [3, 1, 2, 4, 5, 9, 10, 6, 7, 8] @test hcl_r.merges == [-1 -2; -3 1; -4 -5; -9 -10; -7 -8; -6 5; 2 3; 4 6; 7 8] @test hcl_barjoseph.order == collect(1:10) @test hcl_barjoseph.merges == [-1 -2; 1 -3; -4 -5; -9 -10; -7 -8; -6 5; 2 3; 6 4; 7 8] @test hcl_barjoseph.merges == hcl_optimal.merges @test hcl_barjoseph.order == hcl_optimal.order @test_throws ArgumentError hclust(dm, linkage=:average, branchorder=:wrong) hcl_zero = hclust(fill(0.0, 0, 0), linkage=:average, branchorder=:barjoseph) @test Clustering.nnodes(hcl_zero) == 0 hcl_one = hclust(fill(0.0, 1, 1), linkage=:average, branchorder=:barjoseph) @test Clustering.nnodes(hcl_one) == 1 # Larger matrix to make sure all swaps are tested Random.seed!(rng, 42) D = rand(rng, 50,50) Dm = D + D' hcl_rand = hclust(Dm, linkage=:average, branchorder=:optimal) merges = [-1 -36; -35 -34; 1 -16; -12 -41; -13 -30; -43 -24; -21 -32; -5 -18; -47 -22; -33 -38; -11 -2; -44 -49; -42 -10; 2 -37; -46 -25; -9 -15; -27 -8; -40 -39; -28 -19; -31 -6; 4 -14; 11 -50; -48 -4; 15 3; -23 -7; -20 -3; 18 -45; 5 -29; 9 6; 25 22; 21 7; -26 27; 8 10; 31 19; 13 14; 34 24; 12 20; 30 33; 17 -17; 23 26; 16 37; 32 39; 29 35; 38 36; 41 28; 45 43; 46 44; 42 47; 48 40] @test hcl_rand.merges == merges order = [26, 40, 39, 45, 27, 8, 17, 9, 15, 44, 49, 31, 6, 13, 30, 29, 47, 22, 43, 24, 42, 10, 35, 34, 37, 23, 7, 11, 2, 50, 5, 18, 33, 38, 12, 41, 14, 21, 32, 28, 19, 46, 25, 1, 36, 16, 48, 4, 20, 3] @test hcl_rand.order == order @test Clustering.nnodes(hcl_rand) == 50 end @testset "Tree construction with duplicate distances (#176)" begin hclupi = hclust(fill(3.141592653589, 4, 4), linkage=:average) @test hclupi.heights == fill(3.141592653589, 3) @test hclupi.merges == [-1 -2; -4 1; -3 2] # check that the tree construction with the given matrix does not fail dist1_mtx = readdlm(joinpath(@__DIR__, "data", "hclust_dist_issue176_1.txt"), ',', Float64) hclu1_avg = hclust(dist1_mtx, linkage=:average) hclu1_min = hclust(dist1_mtx, linkage=:single) hclu1_ward = hclust(dist1_mtx, linkage=:ward) dist2_mtx = readdlm(joinpath(@__DIR__, "data", "hclust_dist_issue176_2.txt"), ',', Float64) hclu2_avg = hclust(dist2_mtx, linkage=:average) hclu2_min = hclust(dist2_mtx, linkage=:single) hclu2_ward = hclust(dist2_mtx, linkage=:ward) end @testset "cuttree() with merges not sorted by height (#252)" begin dist_mtx = readdlm(joinpath(@__DIR__, "data", "hclust_dist_issue252.txt"), ',', Float64) hclu_opt = hclust(dist_mtx, linkage=:complete, branchorder=:optimal) clu_opt = cutree(hclu_opt, h=20) hclu_r = hclust(dist_mtx, linkage=:complete) clu_r = cutree(hclu_r, h=20) @test clu_opt == clu_r end end # testset "hclust()"
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
6295
using Test using Clustering using Distances using Random using LinearAlgebra using StableRNGs rng = StableRNG(42) # custom distance metric struct MySqEuclidean <: SemiMetric end # redefinition of Distances.pairwise! for MySqEuclidean type function Distances.pairwise!(dist::MySqEuclidean, r::AbstractMatrix, a::AbstractMatrix, b::AbstractMatrix; dims::Integer=2) dims == 2 || throw(ArgumentError("only dims=2 supported for MySqEuclidean distance")) mul!(r, transpose(a), b) sa2 = sum(abs2, a, dims=1) sb2 = sum(abs2, b, dims=1) @inbounds r .= sa2' .+ sb2 .- 2r end Distances.result_type(::MySqEuclidean, ::Type{T}, ::Type{T}) where T <: Number = T (dist::MySqEuclidean)(a::AbstractMatrix, b::AbstractMatrix) = pairwise!(similar(a, size(a, 2), size(b, 2))) @testset "kmeans() (k-means)" begin @testset "Argument checks" begin @test_throws ArgumentError kmeans(randn(2, 3), 0) @test_throws ArgumentError kmeans(randn(2, 3), 4) @test kmeans(randn(2, 3), 2) isa KmeansResult @test_throws ArgumentError kmeans(randn(2, 3), 2, display=:mylog) for disp in keys(Clustering.DisplayLevels) @test kmeans(randn(2, 3), 2, display=disp) isa KmeansResult end end @testset "k=1 and k=n corner cases" begin x = [0.5 1 2; 1 0.5 0; 3 2 1] km1 = kmeans(x,1) @test km1.centers == reshape([7/6, 0.5, 2.0], (3, 1)) @test km1.counts == [3] @test km1.assignments == [1, 1, 1] km3 = kmeans(x, 3) @test km3.centers == x @test km3.counts == fill(1, (3)) @test km3.assignments == 1:3 w = [0.5, 2.0, 1.0] @test kmeans(x,1,weights=w).wcounts == [3.5] @test kmeans(x,3,weights=w).wcounts == w end Random.seed!(rng, 34568) m = 3 n = 1000 k = 10 x = rand(rng, m, n) xt = copy(transpose(x)) function equal_kmresults(km1::KmeansResult, km2::KmeansResult) @test km1.centers β‰ˆ km2.centers @test km1.assignments == km2.assignments @test km1.costs β‰ˆ km2.costs @test km1.counts == km2.counts @test km1.wcounts == km2.wcounts @test km1.totalcost β‰ˆ km2.totalcost @test km1.iterations == km2.iterations @test km1.converged == km2.converged end @testset "non-weighted" begin Random.seed!(rng, 34568) r = kmeans(x, k; maxiter=50, rng=rng) @test isa(r, KmeansResult{Matrix{Float64}, Float64, Int}) @test nclusters(r) == k @test size(r.centers) == (m, k) @test length(r.assignments) == n @test all(a -> 1 <= a <= k, r.assignments) @test length(r.costs) == n @test length(counts(r)) == k @test sum(counts(r)) == n @test wcounts(r) == counts(r) @test sum(r.costs) β‰ˆ r.totalcost Random.seed!(rng, 34568) r_t = kmeans(xt', k; maxiter=50, rng=rng) equal_kmresults(r, r_t) end @testset "non-weighted (float32)" begin Random.seed!(rng, 34568) x32 = map(Float32, x) x32t = copy(x32') r = kmeans(x32, k; maxiter=50, rng=rng) @test isa(r, KmeansResult{Matrix{Float32}, Float32, Int}) @test nclusters(r) == k @test size(r.centers) == (m, k) @test length(r.assignments) == n @test all(a -> 1 <= a <= k, r.assignments) @test length(r.costs) == n @test length(counts(r)) == k @test sum(counts(r)) == n @test wcounts(r) == counts(r) @test sum(r.costs) β‰ˆ r.totalcost Random.seed!(rng, 34568) r_t = kmeans(x32t', k; maxiter=50, rng=rng) equal_kmresults(r, r_t) end @testset "weighted" begin w = rand(rng, n) Random.seed!(rng, 34568) r = kmeans(x, k; maxiter=50, weights=w, rng=rng) @test isa(r, KmeansResult{Matrix{Float64}, Float64, Float64}) @test nclusters(r) == k @test size(r.centers) == (m, k) @test length(r.assignments) == n @test all(a -> 1 <= a <= k, r.assignments) @test length(r.costs) == n @test length(counts(r)) == k @test sum(counts(r)) == n cw = zeros(k) for i = 1:n cw[r.assignments[i]] += w[i] end @test wcounts(r) β‰ˆ cw @test dot(r.costs, w) β‰ˆ r.totalcost Random.seed!(rng, 34568) r_t = kmeans(xt', k; maxiter=50, weights=w, rng=rng) equal_kmresults(r, r_t) end @testset "custom distance" begin r = kmeans(x, k; maxiter=50, init=:kmcen, distance=MySqEuclidean()) r2 = kmeans(x, k; maxiter=50, init=:kmcen) @test isa(r, KmeansResult{Matrix{Float64}, Float64, Int}) @test nclusters(r) == k @test size(r.centers) == (m, k) @test length(r.assignments) == n @test all(a -> 1 <= a <= k, r.assignments) @test length(r.costs) == n @test length(counts(r)) == k @test sum(counts(r)) == n @test wcounts(r) == r.counts @test sum(r.costs) β‰ˆ r.totalcost equal_kmresults(r, r2) r_t = kmeans(xt', k; maxiter=50, init=:kmcen, distance=MySqEuclidean()) equal_kmresults(r, r_t) end @testset "Argument checks" begin Random.seed!(rng, 34568) n = 50 k = 10 x = randn(rng, m, n) @testset "init=" begin @test_throws ArgumentError kmeans(x, k, init=1:(k-2)) @test_throws ArgumentError kmeans(x, k, init=1:(k+2)) @test kmeans(x, k, init=1:k, maxiter=5) isa KmeansResult @test_throws ArgumentError kmeans(x, k, init=:myseeding) for algname in (:kmpp, :kmcen, :rand) alg = Clustering.seeding_algorithm(algname) @test kmeans(x, k, init=algname) isa KmeansResult @test kmeans(x, k, init=alg) isa KmeansResult end end end @testset "Integer data" begin x = rand(rng, Int16, m, n) Random.seed!(rng, 654) r = kmeans(x, k; maxiter=50, rng=rng) @test isa(r, KmeansResult{Matrix{Float64}, Float64, Int}) end @testset "kmeans! data types" begin Random.seed!(rng, 1101) for TX in (Int, Float32, Float64) for TC in (Float32, Float64) for TW in (Nothing, Int, Float32, Float64) x = rand(rng, TX, m, n) c = rand(rng, TC, m, k) if TW == Nothing r = kmeans!(x, c; maxiter=1) @test isa(r, KmeansResult{Matrix{TC},<:Real,Int}) else w = rand(rng, TW, n) r = kmeans!(x, c; weights=w, maxiter=1) @test isa(r, KmeansResult{Matrix{TC},<:Real,TW}) end end end end end end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
3243
using Test using Distances using Clustering include("test_helpers.jl") @testset "kmedoids() (k-medoids)" begin @testset "Argument checks" begin Random.seed!(34568) @test_throws ArgumentError kmedoids(randn(2, 3), 1) @test_throws ArgumentError kmedoids(randn(2, 3), 4) dist = max.(pairwise(Euclidean(), randn(2, 3), dims=2), 0.1) @test @inferred(kmedoids(dist, 2)) isa KmedoidsResult # incorrect distance matrix invdist = inv.(max.(pairwise(Euclidean(), randn(2, 3), dims=2), 0.1)) @test_throws ArgumentError kmedoids(invdist, 2) @test_throws ArgumentError kmedoids(dist, 2, display=:mylog) for disp in keys(Clustering.DisplayLevels) @test @inferred(kmedoids(dist, 2, display=disp)) isa KmedoidsResult end end Random.seed!(34568) d = 3 n = 200 k = 10 X = rand(d, n) dist = pairwise(SqEuclidean(), X, dims=2) @assert size(dist) == (n, n) Random.seed!(34568) # reset seed again to known state R = @inferred(kmedoids(dist, k)) @test isa(R, KmedoidsResult) @test nclusters(R) == k @test length(R.medoids) == length(unique(R.medoids)) @test all(a -> 1 <= a <= k, R.assignments) @test R.assignments[R.medoids] == 1:k # Every medoid should belong to its own cluster @test sum(counts(R)) == n @test wcounts(R) == counts(R) @test R.costs == dist[LinearIndices((n, n))[CartesianIndex.(R.medoids[R.assignments], 1:n)]] @test isapprox(sum(R.costs), R.totalcost) @test R.converged @testset "Support for arrays other than Matrix{T}" begin @testset "$(typeof(M))" for M in equivalent_matrices(dist) Random.seed!(34568) # restore seed as kmedoids is not determantistic R2 = kmedoids(M, k) @test R2.assignments == R.assignments end end @testset "Duplicated points (#231)" begin pts = [0.0 0.0] dists = pairwise(SqEuclidean(), pts, dims=2) dupmed = kmedoids(dists, 2) @test nclusters(dupmed) == 2 @test sort(dupmed.medoids) == [1, 2] @test sort(dupmed.assignments) == [1, 2] end @testset "Toy example #1" begin pts = [1 2 3; .1 .2 .3; 4 5.6 7] # k=1 and k=n cases dists = pairwise(SqEuclidean(), pts, dims=2) @testset "k=1" begin kmed1 = @inferred(kmedoids(dists, 1)) @test nclusters(kmed1) == 1 @test assignments(kmed1) == [1, 1, 1] @test kmed1.medoids == [2] end @testset "k=3" begin kmed3 = @inferred(kmedoids(dists, 3)) @test nclusters(kmed3) == 3 @test sort(assignments(kmed3)) == [1, 2, 3] @test sort(kmed3.medoids) == [1, 2, 3] end end @testset "Toy example #2" begin pts = reshape(map(Float64, [1, 6, 2, 3, 7, 21, 8, 20, 22]), 1, 9) # this data set has three obvious groups: # group 1: [1, 3, 4], values: [1, 2, 3] # group 2: [2, 5, 7], values: [6, 7, 8] # group 3: [6, 8, 9], values: [21, 20, 22] dists = pairwise(SqEuclidean(), pts, dims=2) R = @inferred(kmedoids!(dists, [1, 2, 6])) @test isa(R, KmedoidsResult) @test nclusters(R) == 3 @test R.medoids == [3, 5, 6] @test R.assignments == [1, 2, 1, 1, 2, 3, 2, 3, 3] @test counts(R) == [3, 3, 3] @test wcounts(R) == counts(R) @test R.costs β‰ˆ [1, 1, 0, 1, 0, 0, 1, 1, 1] @test R.totalcost β‰ˆ 6.0 @test R.converged end end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
3239
# simple program to test MCL clustering using Test using Clustering @testset "MCL" begin @testset "Argument Checks" begin Random.seed!(34568) @test_throws DimensionMismatch mcl(zeros(Float64, 4, 3)) # nonsquare adj = inv.(max.(pairwise(Euclidean(), randn(2, 3), dims=2), 0.1)) @test_throws ArgumentError mcl(zeros(Float64, 3, 3), display=:mylog) for disp in keys(Clustering.DisplayLevels) @test mcl(zeros(Float64, 3, 3), display=disp) isa MCLResult end end Random.seed!(34568) # initialize adjacency matrix of a weighted graph nodes = [:bat, :bit, :cat, :fit, :hat, :hit] edges = Tuple{Symbol, Symbol, Float64}[(:cat, :hat, 0.2), (:hat, :bat, 0.16), (:bat, :cat, 1.0), (:bat, :bit, 0.125), (:bit, :fit, 0.25), (:fit, :hit, 0.5), (:hit, :bit, 0.16)] adj_matrix = zeros(Float64, length(nodes), length(nodes)) for edge in edges n1 = findfirst(isequal(edge[1]), nodes) n2 = findfirst(isequal(edge[2]), nodes) adj_matrix[n1, n2] = adj_matrix[n2, n1] = edge[3] end @assert issymmetric(adj_matrix) @testset "fractional inflation param (1.8)" begin res = mcl(adj_matrix, display=:none, inflation=1.8) @test isa(res, MCLResult) local k = length(res.counts) # @show k @test k == 2 @test all(a -> 1 <= a <= k, res.assignments) @test length(res.assignments) == length(nodes) @test length(res.counts) == k local c for c in 1:k @test count(==(c), res.assignments) == res.counts[c] end @test res.nunassigned == 0 @test res.assignments == [1, 2, 1, 2, 1, 2] end @testset "integer inflation param (2)" begin res = mcl(adj_matrix, display=:none, inflation=2) @test isa(res, MCLResult) @test length(res.assignments) == length(nodes) @test res.nunassigned == 0 end @testset "test non-integral expansion" begin # should not raise an exception res = mcl(adj_matrix, display=:none, inflation=1.5, expansion=1.5, save_final_matrix=true) @test isa(res, MCLResult) @test length(res.assignments) == length(nodes) @test size(res.mcl_adj) == size(adj_matrix) # test that the matrix is returned end @testset "allow_singles option" begin res = mcl(diagm(0 => [1.0, 1.0]), display=:none, allow_singles=true) @test length(res.counts) == 2 @test res.assignments == [1, 2] @test res.counts == [1, 1] @test res.nunassigned == 0 res = mcl(diagm(0 => [1.0, 1.0]), display=:none, allow_singles=false) @test length(res.counts) == 0 @test res.assignments == [0, 0] @test res.nunassigned == 2 end @testset "sparse input matrix" begin res = mcl(sparse(adj_matrix), display=:none, expansion=2) @test isa(res, MCLResult) @test length(res.assignments) == length(nodes) @test res.nunassigned == 0 @test eltype(res.mcl_adj) === Float64 # fractional powers not supported for sparse matrices @test_broken mcl(sparse(adj_matrix), display=:none, expansion=2.1) end @testset "use Float32 input" begin res = mcl(convert(Matrix{Float32},adj_matrix), display=:none, expansion=2) @test isa(res, MCLResult) @test eltype(res.mcl_adj) === Float32 end end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
695
using Test using Clustering @testset "mutualinfo() (mutual information)" begin # https://nlp.stanford.edu/IR-book/html/htmledition/evaluation-of-clustering-1.html a1 = [1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3] a2 = [1, 1, 1, 1, 1, 2, 3, 3, 1, 2, 2, 2, 2, 2, 3, 3, 3] @test mutualinfo(a1, a2, normed=false) β‰ˆ 0.39 atol=1.0e-2 @test mutualinfo(a1, a2) β‰ˆ 0.36 atol=1.0e-2 # https://doi.org/10.1186/1471-2105-7-380 a1 = [1, 1, 1, 1, 1, 3, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 1, 2] a2 = [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4] @test mutualinfo(a1, a2, normed=false) β‰ˆ 0.6 atol=0.1 @test mutualinfo(a1, a2) β‰ˆ 0.5 atol=0.1 end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
1089
# Test Rand index using Test using Clustering @testset "randindex() (Rand index)" begin a1 = [1, 1, 1, 2, 2, 2, 3, 3, 3, 3] a2 = [1, 1, 1, 1, 2, 2, 2, 2, 2, 2] a3 = [3, 3, 3, 2, 2, 2, 1, 1, 1, 1] (ARI, RI, MI, HI) = randindex(a1, a1) @test ARI β‰ˆ 1.0 atol=1.0e-12 @test RI β‰ˆ 1.0 atol=1.0e-12 @test MI β‰ˆ 0.0 atol=1.0e-12 @test HI β‰ˆ 1.0 atol=1.0e-12 (ARI, RI, MI, HI) = randindex(a1, a3) @test ARI β‰ˆ 1.0 atol=1.0e-12 @test RI β‰ˆ 1.0 atol=1.0e-12 @test MI β‰ˆ 0.0 atol=1.0e-12 @test HI β‰ˆ 1.0 atol=1.0e-12 (ARI, RI, MI, HI) = randindex(a1, a2) @test ARI β‰ˆ 0.403669 atol=1.0e-5 @test RI β‰ˆ 0.711111 atol=1.0e-5 @test MI β‰ˆ 0.288888 atol=1.0e-5 @test HI β‰ˆ 0.422222 atol=1.0e-5 @test randindex(a1, a2) == randindex(a2, a1) @test randindex(ones(Int, 3), ones(Int, 3)) == (1, 1, 0, 1) @testset "large independent clusterings (#225)" begin rng = MersenneTwister(123) n = 10_000_000 k = 5 # number of clusters a = rand(rng, 1:k, n) b = rand(rng, 1:k, n) @test collect(randindex(a, b)) β‰ˆ [0.0, ((k-1)^2 + 1)/k^2, 2*(k-1)/k^2, ((k-2)/k)^2] atol=1e-5 end end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
559
using Clustering using Test using Random using LinearAlgebra using SparseArrays using StableRNGs using Statistics tests = ["seeding", "kmeans", "kmedoids", "affprop", "dbscan", "fuzzycmeans", "counts", "silhouette", "clustering_quality", "varinfo", "randindex", "hclust", "mcl", "vmeasure", "mutualinfo", "confusion"] println("Runing tests:") for t in tests fp = "$(t).jl" println("* $fp ...") include(fp) end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
4540
using Clustering using Distances: SqEuclidean, pairwise using Test @testset "seeding" begin Random.seed!(34568) @test RandSeedAlg <: SeedingAlgorithm @test KmppAlg <: SeedingAlgorithm @test KmCentralityAlg <: SeedingAlgorithm alldistinct(x::Vector{Int}) = (length(Set(x)) == length(x)) function min_interdist(X::AbstractMatrix) dists = pairwise(SqEuclidean(), X, dims=2) n = size(X, 2) r = Inf for i = 1:n, j = 1:n if i != j && dists[i,j] < r r = dists[i,j] end end return r end d = 3 n = 100 k = 5 X = rand(d, n) C = pairwise(SqEuclidean(), X, dims=2) Xt = copy(transpose(X)) Ct = copy(transpose(C)) md0 = min_interdist(X) @testset "Argument checks" begin @test_throws ArgumentError initseeds([1, 2], X, 3) @test initseeds([1, 2], X, 2) == [1, 2] @test_throws ArgumentError initseeds([-1, 2, 3], X, 3) @test_throws ArgumentError initseeds([1, n+2, 3], X, 3) @test_throws ArgumentError initseeds_by_costs([1, 2], C, 3) @test initseeds_by_costs([1, 2], C, 2) == [1, 2] @test_throws ArgumentError initseeds(:myseeding, X, 2) iseeds = initseeds(:kmpp, X, k) @test_throws DimensionMismatch copyseeds!(Matrix{Float64}(undef, 3, 6), X, iseeds) @test_throws DimensionMismatch copyseeds!(Matrix{Float64}(undef, 4, 5), X, iseeds) @test copyseeds!(Matrix{Float64}(undef, 3, 5), X, iseeds) isa Matrix{Float64} @testset "Seeds number check for $(typeof(alg))" for alg in (RandSeedAlg(), KmppAlg(), KmCentralityAlg()) @test_throws ArgumentError initseeds(alg, X, 0) @test_throws ArgumentError initseeds(alg, X, n + 1) @test_throws ArgumentError initseeds_by_costs(alg, C, 0) @test_throws ArgumentError initseeds_by_costs(alg, C, n + 1) @test initseeds(alg, X, 4) isa Vector{Int} @test initseeds_by_costs(alg, C, 4) isa Vector{Int} end end @testset "RandSeed" begin Random.seed!(34568) iseeds = initseeds(RandSeedAlg(), X, k) @test length(iseeds) == k @test alldistinct(iseeds) Random.seed!(34568) iseeds_t = initseeds(RandSeedAlg(), Xt', k) @test iseeds == iseeds_t Random.seed!(34568) iseeds2 = initseeds(:rand, X, k) @test iseeds2 == iseeds Random.seed!(34568) iseeds = initseeds_by_costs(RandSeedAlg(), C, k) @test length(iseeds) == k @test alldistinct(iseeds) Random.seed!(34568) iseeds_t = initseeds_by_costs(RandSeedAlg(), Ct', k) @test iseeds == iseeds_t R = copyseeds!(Matrix{Float64}(undef, d, k), X, iseeds) @test isa(R, Matrix{Float64}) @test R == X[:, iseeds] R_t = copyseeds!(Matrix{Float64}(undef, d, k), Xt', iseeds) @test R == R_t end @testset "Kmpp" begin Random.seed!(34568) iseeds = initseeds(KmppAlg(), X, k) @test length(iseeds) == k @test alldistinct(iseeds) Random.seed!(34568) iseeds_t = initseeds(KmppAlg(), Xt', k) @test iseeds == iseeds_t Random.seed!(34568) iseeds2 = initseeds(:kmpp, X, k) @test iseeds2 == iseeds Random.seed!(34568) iseeds_t2 = initseeds(:kmpp, Xt', k) @test iseeds_t2 == iseeds_t Random.seed!(34568) iseeds = initseeds_by_costs(KmppAlg(), C, k) @test length(iseeds) == k @test alldistinct(iseeds) Random.seed!(34568) iseeds_t = initseeds_by_costs(KmppAlg(), Ct', k) @test iseeds == iseeds_t @test min_interdist(X[:, iseeds]) > 20 * md0 @test min_interdist((Xt')[:, iseeds]) > 20 * md0 Random.seed!(34568) iseeds = initseeds_by_costs(:kmpp, C, k) @test length(iseeds) == k @test alldistinct(iseeds) Random.seed!(34568) iseeds_t = initseeds_by_costs(:kmpp, Ct', k) @test iseeds_t == iseeds end @testset "Kmcentrality" begin Random.seed!(34568) iseeds = initseeds(KmCentralityAlg(), X, k) @test length(iseeds) == k @test alldistinct(iseeds) Random.seed!(34568) iseeds_t = initseeds(KmCentralityAlg(), Xt', k) @test iseeds == iseeds_t Random.seed!(34568) iseeds2 = initseeds(:kmcen, X, k) @test iseeds2 == iseeds Random.seed!(34568) iseeds_t2 = initseeds(:kmcen, Xt', k) @test iseeds_t2 == iseeds_t Random.seed!(34568) iseeds = initseeds_by_costs(KmCentralityAlg(), C, k) @test length(iseeds) == k @test alldistinct(iseeds) Random.seed!(34568) iseeds_t = initseeds_by_costs(KmCentralityAlg(), Ct', k) @test iseeds == iseeds_t @test min_interdist(X[:, iseeds]) > 2 * md0 @test min_interdist((Xt')[:, iseeds]) > 2 * md0 end end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
2626
using Test using Clustering using Distances @testset "silhouettes()" begin local D = [0 1 2 3 1 0 1 2 2 1 0 1 3 2 1 0] @assert size(D) == (4, 4) @testset "Input checks" begin @test_throws DimensionMismatch silhouettes([1, 1, 3, 2], D[1:4, 1:3]) @test_throws DimensionMismatch silhouettes([1, 1, 2, 2, 2], D) @test_throws Exception silhouettes([1, 1, 2, 2, 2], D, batch_size=3) D2 = copy(D) D2[2, 3] = 4 @test_throws ArgumentError silhouettes([1, 1, 2, 2], D2) end @test @inferred(silhouettes([1, 1, 2, 2], D)) β‰ˆ [1.5/2.5, 0.5/1.5, 0.5/1.5, 1.5/2.5] @test @inferred(silhouettes([1, 1, 2, 2], convert(Matrix{Float32}, D))) isa AbstractVector{Float32} @test silhouettes([1, 2, 1, 2], D) β‰ˆ [0.0, -0.5, -0.5, 0.0] @test silhouettes([1, 1, 1, 2], D) β‰ˆ [0.5, 0.5, -1/3, 0.0] @testset "zero cluster distances correctly" begin a = [fill(1, 5); fill(2, 5)] d = fill(0, (10, 10)) @test silhouettes(a, d) == fill(0.0, 10) d = fill(1, (10, 10)) for i in 1:10; d[i, i] = 0; end d[1, 2] = d[2, 1] = 5 @test silhouettes(a, d) == [[-0.5, -0.5]; fill(0.0, 8)] end @testset "throws an error when degenerated clustering is given" begin a = fill(1, 10) d = fill(1, (10, 10)) for i in 1:10; d[i, i] = 0; end @test_throws ArgumentError silhouettes(a, d) end @testset "empty clusters handled correctly (#241)" begin X = rand(MersenneTwister(123), 3, 10) pd = pairwise(Euclidean(), X, dims=2) asgns = [5, 2, 2, 3, 2, 2, 3, 2, 3, 5] @test all(>=(-0.5), silhouettes(asgns, pd)) @test all(>=(-0.5), silhouettes(asgns, X, metric=Euclidean())) end @testset "silhouettes(metric=$metric, batch_size=$(batch_size !== nothing ? batch_size : "nothing"))" for (metric, batch_size, supported) in [ (Euclidean(), nothing, true), (Euclidean(), 1000, true), (Euclidean(), 10, false), (SqEuclidean(), nothing, true), (SqEuclidean(), 1000, true), (SqEuclidean(), 10, true), ] Random.seed!(123) X = rand(3, 100) pd = pairwise(metric, X, dims=2) a = rand(1:10, size(X, 2)) kmeans_clu = kmeans(X, 5) if supported @test silhouettes(a, X; metric=metric, batch_size=batch_size) β‰ˆ silhouettes(a, pd) @test silhouettes(kmeans_clu, X; metric=metric, batch_size=batch_size) β‰ˆ silhouettes(kmeans_clu, pd) else @test_throws Exception silhouettes(a, X; metric=metric, batch_size=batch_size) @test_throws Exception silhouettes(kmeans_clu, X; metric=metric, batch_size=batch_size) end end end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
624
using LinearAlgebra using SparseArrays """ equivalent_matrices(x::AbstractMatrix) Returns a collection of matrixes that are equal to the input `x`, but of a different type. Useful for testing if things still work on different types of matrix. """ function equivalent_matrices(x::AbstractMatrix) mats = [ Base.PermutedDimsArray(x, (1,2)), # identity permutation view(x, :, :), view(x, collect.(axes(x))...), # breaks `strides` sparse(x), ] if issymmetric(x) append!(mats, [ Symmetric(x), Transpose(x), ]) end return mats end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
549
# Test variational information using Test using Clustering @testset "varinfo() (variational information)" begin a1 = [1, 1, 1, 2, 2, 2, 3, 3, 3, 3] a2 = [1, 1, 1, 1, 2, 2, 2, 2, 2, 2] @test varinfo(a1, a1) β‰ˆ 0.0 atol=1.0e-12 @test varinfo(a2, a2) β‰ˆ 0.0 atol=1.0e-12 v = varinfo(a1, a2) v_ = varinfo(a2, a1) @test 0.0 < v < log(3) @test v β‰ˆ v_ a1 = [1, 2, 3, 4, 5] a2 = [1, 1, 1, 1, 1] @test varinfo(a1, a2) β‰ˆ log(5) @test varinfo(a2, a1) β‰ˆ log(5) a1 = [1, 1, 1, 2, 2, 2] a2 = [2, 2, 2, 1, 1, 1] @test varinfo(a1, a2) β‰ˆ 0.0 atol=1.0e-12 end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
code
1893
using Test using Clustering @testset "V-measure" begin @testset "reproducing fig.2" begin # Tests are taken from the fig. 2 of the referenced paper: # V-Measure: A conditional entropy-based external cluster evaluation measure, # Andrew Rosenberg and Julia Hirschberg clus = [1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3] v = vmeasure(clus, clus) @test v == 1.0 clas = [1, 1, 1, 2, 3, 3, 3, 3, 1, 2, 2, 2, 2, 1, 3] v = vmeasure(clas, clus) @test v β‰ˆ 0.14 atol=1e-2 clas = [1, 1, 1, 2, 2, 3, 3, 3, 1, 1, 2, 2, 2, 3, 3] v = vmeasure(clas, clus) @test v β‰ˆ 0.39 atol=1e-2 clus = [1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 5, 5, 6, 6] clas = [1, 1, 1, 2, 2, 3, 3, 3, 1, 1, 2, 2, 2, 3, 3, 1, 2, 3, 1, 2, 3] v = vmeasure(clas, clus) @test v β‰ˆ 0.30 atol=1e-2 clus = [1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 5, 6, 7, 8, 9] v = vmeasure(clas, clus) @test v β‰ˆ 0.41 atol=1e-2 @test_throws ArgumentError vmeasure(clas, clus, Ξ² = -1.0) end @testset "comparing 2 k-means clusterings" begin Random.seed!(34568) # set random seed for RNG used by kmeans() rng = StableRNG(34568) m = 3 n = 1000 k = 10 # non-weighted v = mean([begin x = rand(rng, m, n) vmeasure(kmeans(x, k; maxiter=50), kmeans(x, k; maxiter=50)) end for _ in 1:200]) @test 0.5 < v < 1.0 @test v β‰ˆ 0.75 atol=1e-2 # FIXME why 0.75? end @testset "comparing 2 random label assignments" begin rng = StableRNG(34568) k = 10 n = 10000 a1 = rand(rng, 1:k, n) a2 = rand(rng, 1:k, n) v = vmeasure(a1, a2) @test v β‰ˆ 0.0 atol=1e-2 # should be close to zero end end
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
docs
1705
# Clustering.jl Methods for data clustering and evaluation of clustering quality. [![Build Status](https://github.com/JuliaStats/Clustering.jl/workflows/CI/badge.svg)](https://github.com/JuliaStats/Clustering.jl/actions?query=workflow%3ACI+branch%3Amaster) [![codecov](https://codecov.io/gh/JuliaStats/Clustering.jl/branch/master/graph/badge.svg)](https://codecov.io/gh/JuliaStats/Clustering.jl) **Documentation**: [![][docs-stable-img]][docs-stable-url] [![][docs-dev-img]][docs-dev-url] ## Installation ```julia Pkg.add("Clustering") ``` ## Features ### Clustering Algorithms - K-means - K-medoids - Affinity Propagation - Density-based spatial clustering of applications with noise (DBSCAN) - Markov Clustering Algorithm (MCL) - Fuzzy C-Means Clustering - Hierarchical Clustering - Single Linkage - Average Linkage - Complete Linkage - Ward's Linkage ### Clustering Validation - Silhouettes - Variation of Information - Rand index - V-Measure ## See Also Julia packages providing other clustering methods and performance evaluation: - [QuickShiftClustering.jl](https://github.com/rened/QuickShiftClustering.jl) - [SpectralClustering.jl](https://github.com/lucianolorenti/SpectralClustering.jl) - [ClusteringBenchmarks.jl](https://github.com/HolyLab/ClusteringBenchmarks.jl) [docs-dev-img]: https://img.shields.io/badge/docs-dev-blue.svg [docs-dev-url]: http://JuliaStats.github.io/Clustering.jl/dev/ [docs-latest-img]: https://img.shields.io/badge/docs-latest-blue.svg [docs-latest-url]: http://JuliaStats.github.io/Clustering.jl/latest/ [docs-stable-img]: https://img.shields.io/badge/docs-stable-blue.svg [docs-stable-url]: http://JuliaStats.github.io/Clustering.jl/stable/
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
docs
429
# Affinity Propagation [Affinity propagation](http://en.wikipedia.org/wiki/Affinity_propagation) is a clustering algorithm based on *message passing* between data points. Similar to [K-medoids](@ref), it looks at the (dis)similarities in the data, picks one *exemplar* data point for each cluster, and assigns every point in the data set to the cluster with the closest *exemplar*. ```@docs affinityprop AffinityPropResult ```
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
docs
2076
# [Basics](@id clu_algo_basics) The package implements a variety of clustering algorithms: ```@contents Pages = ["kmeans.md", "kmedoids.md", "hclust.md", "mcl.md", "affprop.md", "dbscan.md", "fuzzycmeans.md"] ``` Most of the clustering functions in the package have a similar interface, making it easy to switch between different clustering algorithms. ## Inputs A clustering algorithm, depending on its nature, may accept an input matrix in either of the following forms: - Data matrix ``X`` of size ``d \times n``, the ``i``-th column of ``X`` (`X[:, i]`) is a data point (data *sample*) in ``d``-dimensional space. - Distance matrix ``D`` of size ``n \times n``, where ``D_{ij}`` is the distance between the ``i``-th and ``j``-th points, or the cost of assigning them to the same cluster. ## [Common Options](@id common_options) Many clustering algorithms are iterative procedures. The functions share the basic options for controlling the iterations: - `maxiter::Integer`: maximum number of iterations. - `tol::Real`: minimal allowed change of the objective during convergence. The algorithm is considered to be converged when the change of objective value between consecutive iterations drops below `tol`. - `display::Symbol`: the level of information to be displayed. It may take one of the following values: * `:none`: nothing is shown * `:final`: only shows a brief summary when the algorithm ends * `:iter`: shows the progress at each iteration ## Results A clustering function would return an object (typically, an instance of some [`ClusteringResult`](@ref) subtype) that contains both the resulting clustering (e.g. assignments of points to the clusters) and the information about the clustering algorithm (e.g. the number of iterations and whether it converged). ```@docs ClusteringResult ``` The following generic methods are supported by any subtype of `ClusteringResult`: ```@docs nclusters(::ClusteringResult) counts(::ClusteringResult) wcounts(::ClusteringResult) assignments(::ClusteringResult) ```
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
docs
2107
# DBSCAN [Density-based Spatial Clustering of Applications with Noise (DBSCAN)](http://en.wikipedia.org/wiki/DBSCAN) is a data clustering algorithm that finds clusters through density-based expansion of seed points. The algorithm was proposed in: > Martin Ester, Hans-peter Kriegel, JΓΆrg S, and Xiaowei Xu *A > density-based algorithm for discovering clusters in large spatial > databases with noise.* 1996. ## Density Reachability DBSCAN's definition of a cluster is based on the concept of *density reachability*: a point ``q`` is said to be *directly density reachable* by another point ``p`` if the distance between them is below a specified threshold ``\epsilon`` and ``p`` is surrounded by sufficiently many points. Then, ``q`` is considered to be *density reachable* by ``p`` if there exists a sequence ``p_1, p_2, \ldots, p_n`` such that ``p_1 = p`` and ``p_{i+1}`` is directly density reachable from ``p_i``. The points within DBSCAN clusters are categorized into *core* (or *seeds*) and *boundary*: 1. All points of the cluster *core* are mutually *density-connected*, meaning that for any two distinct points ``p`` and ``q`` in a core, there exists a point ``o`` such that both ``p`` and ``q`` are *density reachable* from ``o``. 2. If a point is *density-connected* to any point of a cluster core, it is also part of the core. 3. All points within the ``\epsilon``-neighborhood of any core point, but not belonging to that core (i.e. not *density reachable* from the core), are considered cluster *boundary*. ## Interface The implementation of *DBSCAN* algorithm provided by [`dbscan`](@ref) function supports the two ways of specifying clustering data: - The ``d \times n`` matrix of point coordinates. This is the preferred method as it uses memory- and time-efficient neighboring points queries via [NearestNeighbors.jl](https://github.com/KristofferC/NearestNeighbors.jl) package. - The ``n\times n`` matrix of precalculated pairwise point distances. It requires ``O(n^2)`` memory and time to run. ```@docs dbscan DbscanResult DbscanCluster ```
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
docs
1527
# [Fuzzy C-means](@id fuzzy_cmeans_def) [Fuzzy C-means](https://en.wikipedia.org/wiki/Fuzzy_clustering#Fuzzy_C-means_clustering) is a clustering method that provides cluster membership weights instead of "hard" classification (e.g. K-means). From a mathematical standpoint, fuzzy C-means solves the following optimization problem: ```math \arg\min_\mathcal{C} \ \sum_{i=1}^n \sum_{j=1}^C w_{ij}^\mu \| \mathbf{x}_i - \mathbf{c}_j \|^2, \ \text{where}\ w_{ij} = \left(\sum_{k=1}^{C} \left(\frac{\left\|\mathbf{x}_i - \mathbf{c}_j \right\|}{\left\|\mathbf{x}_i - \mathbf{c}_k \right\|}\right)^{\frac{2}{\mu-1}}\right)^{-1} ``` Here, ``\mathbf{c}_j`` is the center of the ``j``-th cluster, ``w_{ij}`` is the membership weight of the ``i``-th point in the ``j``-th cluster, and ``\mu > 1`` is a user-defined fuzziness parameter. ```@docs fuzzy_cmeans FuzzyCMeansResult wcounts ``` ## Examples ```@example using Clustering # make a random dataset with 1000 points # each point is a 5-dimensional vector X = rand(5, 1000) # performs Fuzzy C-means over X, trying to group them into 3 clusters # with a fuzziness factor of 2. Set maximum number of iterations to 200 # set display to :iter, so it shows progressive info at each iteration R = fuzzy_cmeans(X, 3, 2, maxiter=200, display=:iter) # get the centers (i.e. weighted mean vectors) # M is a 5x3 matrix # M[:, k] is the center of the k-th cluster M = R.centers # get the point memberships over all the clusters # memberships is a 20x3 matrix memberships = R.weights ```
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
docs
705
# Hierarchical Clustering [Hierarchical clustering](https://en.wikipedia.org/wiki/Hierarchical_clustering) algorithms build a dendrogram of nested clusters by repeatedly merging or splitting clusters. The `hclust` function implements several classical algorithms for hierarchical clustering (the algorithm to use is defined by the `linkage` parameter): ```@docs hclust Hclust ``` Single-linkage clustering using distance matrix: ```@example using Clustering D = rand(1000, 1000); D += D'; # symmetric distance matrix (optional) result = hclust(D, linkage=:single) ``` The resulting dendrogram could be converted into disjoint clusters with the help of [`cutree`](@ref) function. ```@docs cutree ```
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
docs
422
# Clustering.jl package *Clustering.jl* is a Julia package for data clustering. It covers the two aspects of data clustering: - [Clustering Algorithms](@ref clu_algo_basics): K-means, K-medoids, Affinity propagation, DBSCAN etc. - [Clustering Comparison & Evaluation](@ref clu_validate): cross-tabulation, variational and mutual information, intrinsic clustering quality indices, such as *silhouettes*, etc.
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
docs
962
# [Initialization](@id clu_algo_init) A clustering algorithm usually requires initialization before it could be started. ## Seeding *Seeding* is a type of clustering initialization, which provides a few *seeds* -- points from a data set that would serve as the initial cluster centers (one for each cluster). Each seeding algorithm implemented by *Clustering.jl* is a subtype of `SeedingAlgorithm`: ```@docs SeedingAlgorithm initseeds! initseeds_by_costs! ``` There are several seeding methods described in the literature. *Clustering.jl* implements three popular ones: ```@docs KmppAlg KmCentralityAlg RandSeedAlg ``` In practice, we have found that *Kmeans++* is the most effective choice. For convenience, the package defines the two wrapper functions that accept the short name of the seeding algorithm and the number of clusters and take care of allocating `iseeds` and applying the proper `SeedingAlgorithm`: ```@docs initseeds initseeds_by_costs ```
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
docs
1725
# K-means [K-means](http://en.wikipedia.org/wiki/K_means) is a classical method for clustering or vector quantization. It produces a fixed number of clusters, each associated with a *center* (also known as a *prototype*), and each data point is assigned to a cluster with the nearest center. From a mathematical standpoint, K-means is a coordinate descent algorithm that solves the following optimization problem: ```math \text{minimize} \ \sum_{i=1}^n \| \mathbf{x}_i - \boldsymbol{\mu}_{z_i} \|^2 \ \text{w.r.t.} \ (\boldsymbol{\mu}, z) ``` Here, ``\boldsymbol{\mu}_k`` is the center of the ``k``-th cluster, and ``z_i`` is an index of the cluster for ``i``-th point ``\mathbf{x}_i``. ```@docs kmeans KmeansResult ``` If you already have a set of initial center vectors, [`kmeans!`](@ref) could be used: ```@docs kmeans! ``` ## Examples ```@example using Clustering # make a random dataset with 1000 random 5-dimensional points X = rand(5, 1000) # cluster X into 20 clusters using K-means R = kmeans(X, 20; maxiter=200, display=:iter) @assert nclusters(R) == 20 # verify the number of clusters a = assignments(R) # get the assignments of points to clusters c = counts(R) # get the cluster sizes M = R.centers # get the cluster centers ``` Scatter plot of the K-means clustering results: ```@example using RDatasets, Clustering, Plots iris = dataset("datasets", "iris"); # load the data features = collect(Matrix(iris[:, 1:4])'); # features to use for clustering result = kmeans(features, 3); # run K-means for the 3 clusters # plot with the point color mapped to the assigned cluster index scatter(iris.PetalLength, iris.PetalWidth, marker_z=result.assignments, color=:lightrainbow, legend=false) ```
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
docs
777
# K-medoids [K-medoids](http://en.wikipedia.org/wiki/K-medoids) is a clustering algorithm that works by finding ``k`` data points (called *medoids*) such that the total distance between each data point and the closest *medoid* is minimal. ```@docs kmedoids kmedoids! KmedoidsResult ``` ## [References](@id kmedoid_refs) 1. Teitz, M.B. and Bart, P. (1968). *Heuristic Methods for Estimating the Generalized Vertex Median of a Weighted Graph*. Operations Research, 16(5), 955–961. [doi:10.1287/opre.16.5.955](https://doi.org/10.1287/opre.16.5.955) 2. Schubert, E. and Rousseeuw, P.J. (2019). *Faster k-medoids clustering: Improving the PAM, CLARA, and CLARANS Algorithms*. SISAP, 171-187. [doi:10.1007/978-3-030-32047-8_16](https://doi.org/10.1007/978-3-030-32047-8_16)
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
docs
413
# MCL (Markov Cluster Algorithm) [Markov Cluster Algorithm](http://micans.org/mcl) works by simulating a stochastic (Markov) flow in a weighted graph, where each node is a data point, and the edge weights are defined by the adjacency matrix. ... When the algorithm converges, it produces the new edge weights that define the new connected components of the graph (i.e. the clusters). ```@docs mcl MCLResult ```
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
0.15.7
9ebb045901e9bbf58767a9f34ff89831ed711aae
docs
10551
# [Evaluation & Validation](@id clu_validate) *Clustering.jl* package provides a number of methods to compare different clusterings, evaluate clustering quality or validate its correctness. ## Clustering comparison Methods to compare two clusterings and measure their similarity. ### Cross tabulation [Cross tabulation](https://en.wikipedia.org/wiki/Contingency_table), or *contingency matrix*, is a basis for many clustering quality measures. It shows how similar are the two clusterings on a cluster level. *Clustering.jl* extends `StatsBase.counts()` with methods that accept [`ClusteringResult`](@ref) arguments: ```@docs counts(::ClusteringResult, ::ClusteringResult) ``` ### Confusion matrix [Confusion matrix](https://en.wikipedia.org/wiki/Confusion_matrix) for the two clusterings is a 2Γ—2 contingency table that counts how frequently the pair of data points are in the same or different clusters. ```@docs confusion ``` ### Rand index [Rand index](http://en.wikipedia.org/wiki/Rand_index) is a measure of the similarity between the two data clusterings. From a mathematical standpoint, Rand index is related to the prediction accuracy, but is applicable even when the original class labels are not used. ```@docs randindex ``` ### Variation of Information [Variation of information](http://en.wikipedia.org/wiki/Variation_of_information) (also known as *shared information distance*) is a measure of the distance between the two clusterings. It is devised from the *mutual information*, but it is a true metric, *i.e.* it is symmetric and satisfies the triangle inequality. ```@docs Clustering.varinfo ``` ### V-measure *V*-measure can be used to compare the clustering results with the existing class labels of data points or with the alternative clustering. It is defined as the harmonic mean of homogeneity (``h``) and completeness (``c``) of the clustering: ```math V_{\beta} = (1+\beta)\frac{h \cdot c}{\beta \cdot h + c}. ``` Both ``h`` and ``c`` can be expressed in terms of the mutual information and entropy measures from the information theory. Homogeneity (``h``) is maximized when each cluster contains elements of as few different classes as possible. Completeness (``c``) aims to put all elements of each class in single clusters. The ``\beta`` parameter (``\beta > 0``) could used to control the weights of ``h`` and ``c`` in the final measure. If ``\beta > 1``, *completeness* has more weight, and when ``\beta < 1`` it's *homogeneity*. ```@docs vmeasure ``` ### Mutual information [Mutual information](https://en.wikipedia.org/wiki/Mutual_information) quantifies the "amount of information" obtained about one random variable through observing the other random variable. It is used in determining the similarity of two different clusterings of a dataset. ```@docs mutualinfo ``` ## Clustering quality indices [`clustering_quality()`][@ref clustering_quality] methods allow computing *intrinsic* clustering quality indices, i.e. the metrics that depend only on the clustering itself and do not use the external knowledge. These metrics can be used to compare different clustering algorithms or choose the optimal number of clusters. | **quality index** | **`quality_index` option** | **clustering type** | **better quality** | **cluster centers** | |:-------------------------------------------:|:--------------------:|:----------:|:-------------:|:-------------------:| | [Calinski-Harabasz](@ref calinsky_harabasz) | `:calinsky_harabasz` | hard/fuzzy | *higher* values | required | | [Xie-Beni](@ref xie_beni) | `:xie_beni` | hard/fuzzy | *lower* values | required | | [Davis-Bouldin](@ref davis_bouldin) | `:davis_bouldin` | hard | *lower* values | required | | [Dunn](@ref dunn) | `:dunn` | hard | *higher* values | not required | | [silhouettes](@ref silhouettes_index) | `:silhouettes` | hard | *higher* values | not required | ```@docs clustering_quality ``` The clustering quality index definitions use the following notation: - ``x_1, x_2, \ldots, x_n``: data points, - ``C_1, C_2, \ldots, C_k``: clusters, - ``c_j`` and ``c``: cluster centers and global dataset center, - ``d``: a similarity (distance) function, - ``w_{ij}``: weights measuring membership of a point ``x_i`` to a cluster ``C_j``, - ``\alpha``: a fuzziness parameter. ### [Calinski-Harabasz index](@id calinsky_harabasz) [*Calinski-Harabasz* index](https://en.wikipedia.org/wiki/Calinski%E2%80%93Harabasz_index) (option `:calinski_harabasz`) measures corrected ratio between global inertia of the cluster centers and the summed internal inertias of clusters: ```math \frac{n-k}{k-1}\frac{\sum_{C_j}|C_j|d(c_j,c)}{\sum\limits_{C_j}\sum\limits_{x_i\in C_j} d(x_i,c_j)} \quad \text{and}\quad \frac{n-k}{k-1} \frac{\sum\limits_{C_j}\left(\sum\limits_{x_i}w_{ij}^\alpha\right) d(c_j,c)}{\sum_{C_j} \sum_{x_i} w_{ij}^\alpha d(x_i,c_j)} ``` for hard and fuzzy (soft) clusterings, respectively. *Higher* values indicate better quality. ### [Xie-Beni index](@id xie_beni) *Xie-Beni* index (option `:xie_beni`) measures ratio between summed inertia of clusters and the minimum distance between cluster centres: ```math \frac{\sum_{C_j}\sum_{x_i\in C_j}d(x_i,c_j)}{n\min\limits_{c_{j_1}\neq c_{j_2}} d(c_{j_1},c_{j_2}) } \quad \text{and}\quad \frac{\sum_{C_j}\sum_{x_i} w_{ij}^\alpha d(x_i,c_j)}{n\min\limits_{c_{j_1}\neq c_{j_2}} d(c_{j_1},c_{j_2}) } ``` for hard and fuzzy (soft) clusterings, respectively. *Lower* values indicate better quality. ### [Davis-Bouldin index](@id davis_bouldin) [*Davis-Bouldin* index](https://en.wikipedia.org/wiki/Davies%E2%80%93Bouldin_index) (option `:davis_bouldin`) measures average cohesion based on the cluster diameters and distances between cluster centers: ```math \frac{1}{k}\sum_{C_{j_1}}\max_{c_{j_2}\neq c_{j_1}}\frac{S(C_{j_1})+S(C_{j_2})}{d(c_{j_1},c_{j_2})} ``` where ```math S(C_j) = \frac{1}{|C_j|}\sum_{x_i\in C_j}d(x_i,c_j). ``` *Lower* values indicate better quality. ### [Dunn index](@id dunn) [*Dunn* index](https://en.wikipedia.org/wiki/Dunn_index) (option `:dunn`) measures the ratio between the nearest neighbour distance divided by the maximum cluster diameter: ```math \frac{\min\limits_{ C_{j_1}\neq C_{j_2}} \mathrm{dist}(C_{j_1},C_{j_2})}{\max\limits_{C_j}\mathrm{diam}(C_j)} ``` where ```math \mathrm{dist}(C_{j_1},C_{j_2}) = \min\limits_{x_{i_1}\in C_{j_1},x_{i_2}\in C_{j_2}} d(x_{i_1},x_{i_2}),\quad \mathrm{diam}(C_j) = \max\limits_{x_{i_1},x_{i_2}\in C_j} d(x_{i_1},x_{i_2}). ``` It is more computationally demanding quality index, which can be used when the centres are not known. *Higher* values indicate better quality. ### [Silhouettes](@id silhouettes_index) [*Silhouettes* metric](http://en.wikipedia.org/wiki/Silhouette_(clustering)) quantifies the correctness of point-to-cluster asssignment by comparing the distance of the point to its cluster and to the other clusters. The *Silhouette* value for the ``i``-th data point is: ```math s_i = \frac{b_i - a_i}{\max(a_i, b_i)}, \ \text{where} ``` - ``a_i`` is the average distance from the ``i``-th point to the other points in the *same* cluster ``z_i``, - ``b_i ≝ \min_{k \ne z_i} b_{ik}``, where ``b_{ik}`` is the average distance from the ``i``-th point to the points in the ``k``-th cluster. Note that ``s_i \le 1``, and that ``s_i`` is close to ``1`` when the ``i``-th point lies well within its own cluster. This property allows using average silhouette value `mean(silhouettes(assignments, counts, X))` as a measure of clustering quality; it is also available using [`clustering_quality(...; quality_index = :silhouettes)`](@ref clustering_quality) method. Higher values indicate better separation of clusters w.r.t. point distances. ```@docs silhouettes ``` [`clustering_quality(..., quality_index=:silhouettes)`][@ref clustering_quality] provides mean silhouette metric for the datapoints. Higher values indicate better quality. ## References > Olatz Arbelaitz *et al.* (2013). *An extensive comparative study of cluster validity indices*. Pattern Recognition. 46 1: 243-256. [doi:10.1016/j.patcog.2012.07.021](https://doi.org/10.1016/j.patcog.2012.07.021) > AybΓΌkΓ« OztΓΌrk, StΓ©phane Lallich, JΓ©rΓ΄me Darmont. (2018). *A Visual Quality Index for Fuzzy C-Means*. 14th International Conference on Artificial Intelligence Applications and Innovations (AIAI 2018). 546-555. [doi:10.1007/978-3-319-92007-8_46](https://doi.org/10.1007/978-3-319-92007-8_46). ### Examples Exemplary data with 3 real clusters. ```@example using Plots, Clustering X = hcat([4., 5.] .+ 0.4 * randn(2, 10), [9., -5.] .+ 0.4 * randn(2, 5), [-4., -9.] .+ 1 * randn(2, 5)) scatter(view(X, 1, :), view(X, 2, :), label = "data points", xlabel = "x", ylabel = "y", legend = :right, ) ``` Hard clustering quality for K-means method with 2 to 5 clusters: ```@example using Plots, Clustering X = hcat([4., 5.] .+ 0.4 * randn(2, 10), [9., -5.] .+ 0.4 * randn(2, 5), [-4., -9.] .+ 1 * randn(2, 5)) nclusters = 2:5 clusterings = kmeans.(Ref(X), nclusters) plot(( plot(nclusters, clustering_quality.(Ref(X), clusterings, quality_index = qidx), marker = :circle, title = ":$qidx", label = nothing, ) for qidx in [:silhouettes, :calinski_harabasz, :xie_beni, :davies_bouldin, :dunn])..., layout = (3, 2), xaxis = "N clusters", plot_title = "\"Hard\" clustering quality indices" ) ``` Fuzzy clustering quality for fuzzy C-means method with 2 to 5 clusters: ```@example using Plots, Clustering X = hcat([4., 5.] .+ 0.4 * randn(2, 10), [9., -5.] .+ 0.4 * randn(2, 5), [-4., -9.] .+ 1 * randn(2, 5)) fuzziness = 2 fuzzy_nclusters = 2:5 fuzzy_clusterings = fuzzy_cmeans.(Ref(X), fuzzy_nclusters, fuzziness) plot(( plot(fuzzy_nclusters, clustering_quality.(Ref(X), fuzzy_clusterings, fuzziness = fuzziness, quality_index = qidx), marker = :circle, title = ":$qidx", label = nothing, ) for qidx in [:calinski_harabasz, :xie_beni])..., layout = (2, 1), xaxis = "N clusters", plot_title = "\"Soft\" clustering quality indices" ) ``` ## Other packages * [ClusteringBenchmarks.jl](https://github.com/HolyLab/ClusteringBenchmarks.jl) provides benchmark datasets and implements additional methods for evaluating clustering performance.
Clustering
https://github.com/JuliaStats/Clustering.jl.git
[ "MIT" ]
1.1.1
81a321298aed95631447a1f3afc2ea83682d44a4
code
30955
# Copyright (c) 2013: Steven G. Johnson and contributors # # Use of this source code is governed by an MIT-style license that can be found # in the LICENSE.md file or at https://opensource.org/licenses/MIT. module NLoptMathOptInterfaceExt import MathOptInterface as MOI import NLopt function __init__() # we need to add extension types back to the toplevel module @static if VERSION >= v"1.9" setglobal!(NLopt, :Optimizer, Optimizer) end return end mutable struct _ConstraintInfo{F,S} func::F set::S end """ Optimizer() Create a new Optimizer object. """ mutable struct Optimizer <: MOI.AbstractOptimizer inner::Union{NLopt.Opt,Nothing} variables::MOI.Utilities.VariablesContainer{Float64} starting_values::Vector{Union{Nothing,Float64}} nlp_data::MOI.NLPBlockData nlp_model::Union{Nothing,MOI.Nonlinear.Model} ad_backend::MOI.Nonlinear.AbstractAutomaticDifferentiation sense::Union{Nothing,MOI.OptimizationSense} objective::Union{ MOI.VariableIndex, MOI.ScalarAffineFunction{Float64}, MOI.ScalarQuadraticFunction{Float64}, Nothing, } linear_le_constraints::Vector{ _ConstraintInfo{ MOI.ScalarAffineFunction{Float64}, MOI.LessThan{Float64}, }, } linear_eq_constraints::Vector{ _ConstraintInfo{MOI.ScalarAffineFunction{Float64},MOI.EqualTo{Float64}}, } quadratic_le_constraints::Vector{ _ConstraintInfo{ MOI.ScalarQuadraticFunction{Float64}, MOI.LessThan{Float64}, }, } quadratic_eq_constraints::Vector{ _ConstraintInfo{ MOI.ScalarQuadraticFunction{Float64}, MOI.EqualTo{Float64}, }, } # Parameters. silent::Bool options::Dict{String,Any} # Solution attributes. objective_value::Float64 solution::Vector{Float64} status::Symbol solve_time::Float64 function Optimizer() return new( nothing, MOI.Utilities.VariablesContainer{Float64}(), Union{Nothing,Float64}[], MOI.NLPBlockData([], _EmptyNLPEvaluator(), false), nothing, MOI.Nonlinear.SparseReverseMode(), nothing, nothing, _ConstraintInfo{ MOI.ScalarAffineFunction{Float64}, MOI.LessThan{Float64}, }[], _ConstraintInfo{ MOI.ScalarAffineFunction{Float64}, MOI.EqualTo{Float64}, }[], _ConstraintInfo{ MOI.ScalarQuadraticFunction{Float64}, MOI.LessThan{Float64}, }[], _ConstraintInfo{ MOI.ScalarQuadraticFunction{Float64}, MOI.EqualTo{Float64}, }[], false, copy(_DEFAULT_OPTIONS), NaN, Float64[], :NOT_CALLED, NaN, ) end end struct _EmptyNLPEvaluator <: MOI.AbstractNLPEvaluator end MOI.initialize(::_EmptyNLPEvaluator, ::Vector{Symbol}) = nothing MOI.eval_constraint(::_EmptyNLPEvaluator, g, x) = nothing MOI.eval_constraint_jacobian(::_EmptyNLPEvaluator, J, x) = nothing function MOI.empty!(model::Optimizer) model.inner = nothing MOI.empty!(model.variables) empty!(model.starting_values) model.nlp_data = MOI.NLPBlockData([], _EmptyNLPEvaluator(), false) model.nlp_model = nothing model.sense = nothing model.objective = nothing empty!(model.linear_le_constraints) empty!(model.linear_eq_constraints) empty!(model.quadratic_le_constraints) empty!(model.quadratic_eq_constraints) model.status = :NOT_CALLED return end function MOI.is_empty(model::Optimizer) return MOI.is_empty(model.variables) && isempty(model.starting_values) && model.nlp_data.evaluator isa _EmptyNLPEvaluator && model.nlp_model === nothing && model.sense == nothing && isempty(model.linear_le_constraints) && isempty(model.linear_eq_constraints) && isempty(model.quadratic_le_constraints) && isempty(model.quadratic_eq_constraints) end function MOI.get(model::Optimizer, ::MOI.ListOfModelAttributesSet) ret = MOI.AbstractModelAttribute[] if model.sense !== nothing push!(ret, MOI.ObjectiveSense()) end if model.objective !== nothing F = MOI.get(model, MOI.ObjectiveFunctionType()) push!(ret, MOI.ObjectiveFunction{F}()) end return ret end MOI.supports_incremental_interface(::Optimizer) = true function MOI.copy_to(model::Optimizer, src::MOI.ModelLike) return MOI.Utilities.default_copy_to(model, src) end MOI.get(::Optimizer, ::MOI.SolverName) = "NLopt" MOI.get(::Optimizer, ::MOI.SolverVersion) = "$(NLopt.version())" function _constraints( model, ::Type{<:MOI.ScalarAffineFunction}, ::Type{<:MOI.LessThan}, ) return model.linear_le_constraints end function _constraints( model, ::Type{<:MOI.ScalarAffineFunction}, ::Type{<:MOI.EqualTo}, ) return model.linear_eq_constraints end function _constraints( model, ::Type{<:MOI.ScalarQuadraticFunction}, ::Type{<:MOI.LessThan}, ) return model.quadratic_le_constraints end function _constraints( model, ::Type{<:MOI.ScalarQuadraticFunction}, ::Type{<:MOI.EqualTo}, ) return model.quadratic_eq_constraints end function MOI.supports_constraint( ::Optimizer, ::Type{ <:Union{ MOI.ScalarAffineFunction{Float64}, MOI.ScalarQuadraticFunction{Float64}, }, }, ::Type{<:Union{MOI.LessThan{Float64},MOI.EqualTo{Float64}}}, ) return true end function MOI.get( model::Optimizer, ::MOI.NumberOfConstraints{F,S}, ) where { F<:Union{ MOI.ScalarAffineFunction{Float64}, MOI.ScalarQuadraticFunction{Float64}, }, S<:Union{MOI.LessThan{Float64},MOI.EqualTo{Float64}}, } return length(_constraints(model, F, S)) end function MOI.get(model::Optimizer, attr::MOI.ListOfConstraintTypesPresent) constraints = MOI.get(model.variables, attr) function _check(model, F, S) if !isempty(_constraints(model, F, S)) push!(constraints, (F, S)) end end _check(model, MOI.ScalarAffineFunction{Float64}, MOI.LessThan{Float64}) _check(model, MOI.ScalarAffineFunction{Float64}, MOI.EqualTo{Float64}) _check(model, MOI.ScalarQuadraticFunction{Float64}, MOI.LessThan{Float64}) _check(model, MOI.ScalarQuadraticFunction{Float64}, MOI.EqualTo{Float64}) return constraints end function MOI.get( model::Optimizer, ::MOI.ListOfConstraintIndices{F,S}, ) where { F<:Union{ MOI.ScalarAffineFunction{Float64}, MOI.ScalarQuadraticFunction{Float64}, }, S<:Union{MOI.LessThan{Float64},MOI.EqualTo{Float64}}, } return MOI.ConstraintIndex{F,S}.(eachindex(_constraints(model, F, S))) end function MOI.get( model::Optimizer, ::MOI.ConstraintFunction, c::MOI.ConstraintIndex{F,S}, ) where { F<:Union{ MOI.ScalarAffineFunction{Float64}, MOI.ScalarQuadraticFunction{Float64}, }, S<:Union{MOI.LessThan{Float64},MOI.EqualTo{Float64}}, } return copy(_constraints(model, F, S)[c.value].func) end function MOI.get( model::Optimizer, ::MOI.ConstraintSet, c::MOI.ConstraintIndex{F,S}, ) where { F<:Union{ MOI.ScalarAffineFunction{Float64}, MOI.ScalarQuadraticFunction{Float64}, }, S<:Union{MOI.LessThan{Float64},MOI.EqualTo{Float64}}, } return _constraints(model, F, S)[c.value].set end # ObjectiveSense MOI.supports(::Optimizer, ::MOI.ObjectiveSense) = true function MOI.set( model::Optimizer, ::MOI.ObjectiveSense, sense::MOI.OptimizationSense, ) model.sense = sense return end function MOI.get(model::Optimizer, ::MOI.ObjectiveSense) return something(model.sense, MOI.FEASIBILITY_SENSE) end # MOI.Silent MOI.supports(::Optimizer, ::MOI.Silent) = true function MOI.set(model::Optimizer, ::MOI.Silent, value::Bool) model.silent = value return end MOI.get(model::Optimizer, ::MOI.Silent) = model.silent # MOI.TimeLimitSec MOI.supports(::Optimizer, ::MOI.TimeLimitSec) = true function MOI.set(model::Optimizer, ::MOI.TimeLimitSec, value::Real) MOI.set(model, MOI.RawOptimizerAttribute("max_cpu_time"), Float64(value)) return end function MOI.set(model::Optimizer, ::MOI.TimeLimitSec, ::Nothing) delete!(model.options, "max_cpu_time") return end function MOI.get(model::Optimizer, ::MOI.TimeLimitSec) return get(model.options, "max_cpu_time", nothing) end # MOI.RawOptimizerAttribute const _DEFAULT_OPTIONS = Dict{String,Any}( "algorithm" => :none, "stopval" => NaN, "ftol_rel" => 1e-7, "ftol_abs" => NaN, "xtol_rel" => 1e-7, "xtol_abs" => nothing, "constrtol_abs" => 1e-7, "maxeval" => 0, "maxtime" => 0.0, "initial_step" => nothing, "population" => 0, "seed" => nothing, "vector_storage" => 0, "local_optimizer" => nothing, ) function MOI.supports(::Optimizer, p::MOI.RawOptimizerAttribute) # TODO(odow): this ignores other algorithm-specific parameters? return haskey(_DEFAULT_OPTIONS, p.name) end function MOI.set(model::Optimizer, p::MOI.RawOptimizerAttribute, value) model.options[p.name] = value return end function MOI.get(model::Optimizer, p::MOI.RawOptimizerAttribute) if !haskey(model.options, p.name) msg = "RawOptimizerAttribute with name $(p.name) is not set." throw(MOI.GetAttributeNotAllowed(p, msg)) end return model.options[p.name] end # Variables function MOI.get(model::Optimizer, ::MOI.ListOfVariableAttributesSet) ret = MOI.AbstractVariableAttribute[] if any(!isnothing, model.starting_values) push!(ret, MOI.VariablePrimalStart()) end return ret end function MOI.add_variable(model::Optimizer) push!(model.starting_values, nothing) return MOI.add_variable(model.variables) end function MOI.supports_constraint( ::Optimizer, ::Type{MOI.VariableIndex}, ::Type{ <:Union{ MOI.LessThan{Float64}, MOI.GreaterThan{Float64}, MOI.EqualTo{Float64}, MOI.Interval{Float64}, }, }, ) return true end function MOI.get( model::Optimizer, attr::Union{ MOI.NumberOfVariables, MOI.ListOfVariableIndices, MOI.NumberOfConstraints{MOI.VariableIndex}, MOI.ListOfConstraintIndices{MOI.VariableIndex}, }, ) return MOI.get(model.variables, attr) end function MOI.get( model::Optimizer, attr::Union{MOI.ConstraintFunction,MOI.ConstraintSet}, ci::MOI.ConstraintIndex{MOI.VariableIndex}, ) return MOI.get(model.variables, attr, ci) end function MOI.is_valid( model::Optimizer, index::Union{MOI.VariableIndex,MOI.ConstraintIndex{MOI.VariableIndex}}, ) return MOI.is_valid(model.variables, index) end function MOI.add_constraint( model::Optimizer, vi::MOI.VariableIndex, set::Union{ MOI.LessThan{Float64}, MOI.GreaterThan{Float64}, MOI.EqualTo{Float64}, MOI.Interval{Float64}, }, ) return MOI.add_constraint(model.variables, vi, set) end function MOI.set( model::Optimizer, attr::MOI.ConstraintSet, ci::MOI.ConstraintIndex{MOI.VariableIndex,S}, set::S, ) where {S} return MOI.set(model.variables, attr, ci, set) end function MOI.delete( model::Optimizer, ci::MOI.ConstraintIndex{MOI.VariableIndex}, ) return MOI.delete(model.variables, ci) end # constraints function MOI.get(::Optimizer, ::MOI.ListOfConstraintAttributesSet) return MOI.AbstractConstraintAttribute[] end function MOI.is_valid( model::Optimizer, ci::MOI.ConstraintIndex{F,S}, ) where { F<:Union{ MOI.ScalarAffineFunction{Float64}, MOI.ScalarQuadraticFunction{Float64}, }, S<:Union{MOI.LessThan{Float64},MOI.EqualTo{Float64}}, } return 1 <= ci.value <= length(_constraints(model, F, S)) end function _check_inbounds(model, f::MOI.VariableIndex) return MOI.throw_if_not_valid(model, f) end function _check_inbounds(model, f::MOI.ScalarAffineFunction{Float64}) for term in f.terms MOI.throw_if_not_valid(model, term.variable) end return end function _check_inbounds(model, f::MOI.ScalarQuadraticFunction{Float64}) for term in f.affine_terms MOI.throw_if_not_valid(model, term.variable) end for term in f.quadratic_terms MOI.throw_if_not_valid(model, term.variable_1) MOI.throw_if_not_valid(model, term.variable_2) end return end function MOI.add_constraint( model::Optimizer, func::F, set::S, ) where { F<:Union{ MOI.ScalarAffineFunction{Float64}, MOI.ScalarQuadraticFunction{Float64}, }, S<:Union{MOI.LessThan{Float64},MOI.EqualTo{Float64}}, } _check_inbounds(model, func) constraints = _constraints(model, F, S) push!(constraints, _ConstraintInfo(func, set)) return MOI.ConstraintIndex{F,S}(length(constraints)) end # MOI.VariablePrimalStart function MOI.supports( ::Optimizer, ::MOI.VariablePrimalStart, ::Type{MOI.VariableIndex}, ) return true end function MOI.set( model::Optimizer, ::MOI.VariablePrimalStart, vi::MOI.VariableIndex, value::Union{Real,Nothing}, ) MOI.throw_if_not_valid(model, vi) model.starting_values[vi.value] = value return end function MOI.get( model::Optimizer, ::MOI.VariablePrimalStart, vi::MOI.VariableIndex, ) MOI.throw_if_not_valid(model, vi) return model.starting_values[vi.value] end # MOI.NLPBlock MOI.supports(::Optimizer, ::MOI.NLPBlock) = true function MOI.set(model::Optimizer, ::MOI.NLPBlock, nlp_data::MOI.NLPBlockData) if model.nlp_model !== nothing error("Cannot mix the new and legacy nonlinear APIs") end model.nlp_data = nlp_data return end # MOI.ObjectiveFunction function MOI.supports( ::Optimizer, ::MOI.ObjectiveFunction{ <:Union{ MOI.VariableIndex, MOI.ScalarAffineFunction{Float64}, MOI.ScalarQuadraticFunction{Float64}, }, }, ) return true end function MOI.get(model::Optimizer, ::MOI.ObjectiveFunctionType) if model.nlp_model !== nothing && model.nlp_model.objective !== nothing return MOI.ScalarNonlinearFunction end return typeof(model.objective) end function MOI.get(model::Optimizer, ::MOI.ObjectiveFunction{F}) where {F} return convert(F, model.objective)::F end function MOI.set( model::Optimizer, ::MOI.ObjectiveFunction{F}, func::F, ) where { F<:Union{ MOI.VariableIndex, MOI.ScalarAffineFunction{Float64}, MOI.ScalarQuadraticFunction{Float64}, }, } _check_inbounds(model, func) model.objective = func if model.nlp_model !== nothing MOI.Nonlinear.set_objective(model.nlp_model, nothing) end return end # ScalarNonlinearFunction function _init_nlp_model(model) if model.nlp_model === nothing if !(model.nlp_data.evaluator isa _EmptyNLPEvaluator) error("Cannot mix the new and legacy nonlinear APIs") end model.nlp_model = MOI.Nonlinear.Model() end return end function MOI.is_valid( model::Optimizer, ci::MOI.ConstraintIndex{MOI.ScalarNonlinearFunction,S}, ) where { S<:Union{ MOI.EqualTo{Float64}, MOI.LessThan{Float64}, MOI.GreaterThan{Float64}, MOI.Interval{Float64}, }, } if model.nlp_model === nothing return false end index = MOI.Nonlinear.ConstraintIndex(ci.value) return MOI.is_valid(model.nlp_model, index) end function MOI.supports_constraint( ::Optimizer, ::Type{MOI.ScalarNonlinearFunction}, ::Type{S}, ) where { S<:Union{ MOI.EqualTo{Float64}, MOI.LessThan{Float64}, MOI.GreaterThan{Float64}, MOI.Interval{Float64}, }, } return true end function MOI.add_constraint( model::Optimizer, f::MOI.ScalarNonlinearFunction, set::Union{ MOI.EqualTo{Float64}, MOI.LessThan{Float64}, MOI.GreaterThan{Float64}, MOI.Interval{Float64}, }, ) _init_nlp_model(model) index = MOI.Nonlinear.add_constraint(model.nlp_model, f, set) return MOI.ConstraintIndex{typeof(f),typeof(set)}(index.value) end function MOI.supports( ::Optimizer, ::MOI.ObjectiveFunction{MOI.ScalarNonlinearFunction}, ) return true end function MOI.set( model::Optimizer, attr::MOI.ObjectiveFunction{MOI.ScalarNonlinearFunction}, func::MOI.ScalarNonlinearFunction, ) _init_nlp_model(model) MOI.Nonlinear.set_objective(model.nlp_model, func) return end ### MOI.AutomaticDifferentiationBackend MOI.supports(::Optimizer, ::MOI.AutomaticDifferentiationBackend) = true function MOI.get(model::Optimizer, ::MOI.AutomaticDifferentiationBackend) return model.ad_backend end function MOI.set( model::Optimizer, ::MOI.AutomaticDifferentiationBackend, backend::MOI.Nonlinear.AbstractAutomaticDifferentiation, ) model.ad_backend = backend return end # optimize! function _fill_gradient(grad, x, f::MOI.VariableIndex) grad[f.value] = 1.0 return end function _fill_gradient(grad, x, f::MOI.ScalarAffineFunction{Float64}) for term in f.terms grad[term.variable.value] += term.coefficient end return end function _fill_gradient(grad, x, f::MOI.ScalarQuadraticFunction{Float64}) for term in f.affine_terms grad[term.variable.value] += term.coefficient end for term in f.quadratic_terms i, j = term.variable_1.value, term.variable_2.value grad[i] += term.coefficient * x[j] if i != j grad[j] += term.coefficient * x[i] end end return end function _fill_result(result::Vector, x, offset, constraints::Vector) for (i, constraint) in enumerate(constraints) lhs = MOI.Utilities.eval_variables(vi -> x[vi.value], constraint.func) result[offset+i] = lhs - MOI.constant(constraint.set) end return end function _fill_jacobian(jac, x, offset, term::MOI.ScalarAffineTerm) jac[term.variable.value, offset] += term.coefficient return end function _fill_jacobian(jac, x, offset, term::MOI.ScalarQuadraticTerm) i, j = term.variable_1.value, term.variable_2.value jac[i, offset] += term.coefficient * x[j] if i != j jac[j, offset] += term.coefficient * x[i] end return end function _fill_jacobian(jac, x, offset, f::MOI.ScalarAffineFunction) for term in f.terms _fill_jacobian(jac, x, offset, term) end return end function _fill_jacobian(jac, x, offset, f::MOI.ScalarQuadraticFunction) for term in f.affine_terms _fill_jacobian(jac, x, offset, term) end for q_term in f.quadratic_terms _fill_jacobian(jac, x, offset, q_term) end return end function _fill_jacobian(jac, x, offset, constraints::Vector) for (i, constraint) in enumerate(constraints) _fill_jacobian(jac, x, offset + i, constraint.func) end return end function objective_fn(model::Optimizer, x::Vector, grad::Vector) # The order of the conditions is important. NLP objectives override regular # objectives. if length(grad) > 0 fill!(grad, 0.0) if model.sense == MOI.FEASIBILITY_SENSE # nothing elseif model.nlp_data.has_objective MOI.eval_objective_gradient(model.nlp_data.evaluator, grad, x) elseif model.objective !== nothing _fill_gradient(grad, x, model.objective) end end if model.sense == MOI.FEASIBILITY_SENSE return 0.0 elseif model.nlp_data.has_objective return MOI.eval_objective(model.nlp_data.evaluator, x) elseif model.objective !== nothing return MOI.Utilities.eval_variables(vi -> x[vi.value], model.objective) end # No ObjectiveFunction is set, but ObjectiveSense is? return 0.0 end function _initialize_options!(model::Optimizer) local_optimizer = model.options["local_optimizer"] if local_optimizer !== nothing num_variables = length(model.starting_values) local_optimizer = if local_optimizer isa Symbol NLopt.Opt(local_optimizer, num_variables) else @assert local_optimizer isa NLopt.Opt NLopt.Opt(local_optimizer.algorithm, num_variables) end NLopt.local_optimizer!(model.inner, local_optimizer) end NLopt.stopval!(model.inner, model.options["stopval"]) if !isnan(model.options["ftol_rel"]) NLopt.ftol_rel!(model.inner, model.options["ftol_rel"]) end if !isnan(model.options["ftol_abs"]) NLopt.ftol_abs!(model.inner, model.options["ftol_abs"]) end if !isnan(model.options["xtol_rel"]) NLopt.xtol_rel!(model.inner, model.options["xtol_rel"]) end if model.options["xtol_abs"] != nothing NLopt.xtol_abs!(model.inner, model.options["xtol_abs"]) end NLopt.maxeval!(model.inner, model.options["maxeval"]) NLopt.maxtime!(model.inner, model.options["maxtime"]) if model.options["initial_step"] != nothing NLopt.initial_step!(model.inner, model.options["initial_step"]) end NLopt.population!(model.inner, model.options["population"]) if model.options["seed"] isa Integer NLopt.srand(model.options["seed"]) end NLopt.vector_storage!(model.inner, model.options["vector_storage"]) return end function MOI.optimize!(model::Optimizer) num_variables = length(model.starting_values) model.inner = NLopt.Opt(model.options["algorithm"], num_variables) _initialize_options!(model) if model.nlp_model !== nothing vars = MOI.VariableIndex.(1:num_variables) model.nlp_data = MOI.NLPBlockData( MOI.Nonlinear.Evaluator(model.nlp_model, model.ad_backend, vars), ) end NLopt.lower_bounds!(model.inner, model.variables.lower) NLopt.upper_bounds!(model.inner, model.variables.upper) nonlinear_equality_indices = findall( bound -> bound.lower == bound.upper, model.nlp_data.constraint_bounds, ) nonlinear_inequality_indices = findall( bound -> bound.lower != bound.upper, model.nlp_data.constraint_bounds, ) num_nlpblock_constraints = length(model.nlp_data.constraint_bounds) # map from eqidx/ineqidx to index in equalities/inequalities constrmap = zeros(Int, num_nlpblock_constraints) for (i, k) in enumerate(nonlinear_equality_indices) constrmap[k] = i end num_nlpblock_inequalities = 0 for (i, k) in enumerate(nonlinear_inequality_indices) num_nlpblock_inequalities += 1 constrmap[k] = num_nlpblock_inequalities bounds = model.nlp_data.constraint_bounds[k] if !isinf(bounds.lower) && !isinf(bounds.upper) # constraint has bounds on both sides, keep room for it num_nlpblock_inequalities += 1 end end if string(model.options["algorithm"])[2] == 'N' # Derivative free optimizer chosen MOI.initialize(model.nlp_data.evaluator, Symbol[]) elseif num_nlpblock_constraints > 0 MOI.initialize(model.nlp_data.evaluator, [:Grad, :Jac]) else MOI.initialize(model.nlp_data.evaluator, [:Grad]) end if model.sense == MOI.MAX_SENSE NLopt.max_objective!(model.inner, (x, g) -> objective_fn(model, x, g)) else NLopt.min_objective!(model.inner, (x, g) -> objective_fn(model, x, g)) end Jac_IJ = Tuple{Int,Int}[] if num_nlpblock_constraints > 0 append!(Jac_IJ, MOI.jacobian_structure(model.nlp_data.evaluator)) end Jac_val = zeros(length(Jac_IJ)) g_vec = zeros(num_nlpblock_constraints) function equality_constraint_fn(result::Vector, x::Vector, jac::Matrix) if length(jac) > 0 fill!(jac, 0.0) MOI.eval_constraint_jacobian(model.nlp_data.evaluator, Jac_val, x) for ((row, col), val) in zip(Jac_IJ, Jac_val) bounds = model.nlp_data.constraint_bounds[row] if bounds.lower == bounds.upper jac[col, constrmap[row]] += val end end offset = length(nonlinear_equality_indices) _fill_jacobian(jac, x, offset, model.linear_eq_constraints) offset += length(model.linear_eq_constraints) _fill_jacobian(jac, x, offset, model.quadratic_eq_constraints) end MOI.eval_constraint(model.nlp_data.evaluator, g_vec, x) for (i, index) in enumerate(nonlinear_equality_indices) bounds = model.nlp_data.constraint_bounds[index] result[i] = g_vec[index] - bounds.upper end offset = length(nonlinear_equality_indices) _fill_result(result, x, offset, model.linear_eq_constraints) offset += length(model.linear_eq_constraints) _fill_result(result, x, offset, model.quadratic_eq_constraints) return end num_equality_constraints = length(nonlinear_equality_indices) + length(model.linear_eq_constraints) + length(model.quadratic_eq_constraints) if num_equality_constraints > 0 NLopt.equality_constraint!( model.inner, num_equality_constraints, equality_constraint_fn, model.options["constrtol_abs"], ) end # inequalities need to be massaged a bit # f(x) <= u => f(x) - u <= 0 # f(x) >= l => l - f(x) <= 0 function inequality_constraint_fn(result::Vector, x::Vector, jac::Matrix) if length(jac) > 0 fill!(jac, 0.0) MOI.eval_constraint_jacobian(model.nlp_data.evaluator, Jac_val, x) for ((row, col), val) in zip(Jac_IJ, Jac_val) bounds = model.nlp_data.constraint_bounds[row] if bounds.lower == bounds.upper continue # This is an equality constraint elseif isinf(bounds.lower) # upper bound jac[col, constrmap[row]] += val elseif isinf(bounds.upper) # lower bound jac[col, constrmap[row]] -= val else # boxed jac[col, constrmap[row]] += val jac[col, constrmap[row]+1] -= val end end offset = num_nlpblock_inequalities _fill_jacobian(jac, x, offset, model.linear_le_constraints) offset += length(model.linear_le_constraints) _fill_jacobian(jac, x, offset, model.quadratic_le_constraints) end # Fill in the result. The first entries are from NLPBlock, and the value # of g(x) is placed in g_vec. MOI.eval_constraint(model.nlp_data.evaluator, g_vec, x) for row in 1:num_nlpblock_constraints index = constrmap[row] bounds = model.nlp_data.constraint_bounds[row] if bounds.lower == bounds.upper continue # This is an equality constraint elseif isinf(bounds.lower) # g(x) <= u --> g(x) - u <= 0 result[index] = g_vec[row] - bounds.upper elseif isinf(bounds.upper) # g(x) >= l --> l - g(x) <= 0 result[index] = bounds.lower - g_vec[row] else # l <= g(x) <= u result[index] = g_vec[row] - bounds.upper result[index+1] = bounds.lower - g_vec[row] end end offset = num_nlpblock_inequalities _fill_result(result, x, offset, model.linear_le_constraints) offset += length(model.linear_le_constraints) _fill_result(result, x, offset, model.quadratic_le_constraints) return end num_inequality_constraints = num_nlpblock_inequalities + length(model.linear_le_constraints) + length(model.quadratic_le_constraints) if num_inequality_constraints > 0 NLopt.inequality_constraint!( model.inner, num_inequality_constraints, inequality_constraint_fn, model.options["constrtol_abs"], ) end # Set MOI.VariablePrimalStart, clamping to bound nearest 0 if not given. model.solution = something.( model.starting_values, clamp.(0.0, model.variables.lower, model.variables.upper), ) start_time = time() model.objective_value, _, model.status = NLopt.optimize!(model.inner, model.solution) model.solve_time = time() - start_time return end const _STATUS_MAP = Dict( :NOT_CALLED => (MOI.OPTIMIZE_NOT_CALLED, MOI.NO_SOLUTION), # The order here matches the nlopt_result enum :FAILURE => (MOI.OTHER_ERROR, MOI.UNKNOWN_RESULT_STATUS), :INVALID_ARGS => (MOI.INVALID_OPTION, MOI.UNKNOWN_RESULT_STATUS), :OUT_OF_MEMORY => (MOI.MEMORY_LIMIT, MOI.UNKNOWN_RESULT_STATUS), :ROUNDOFF_LIMITED => (MOI.ALMOST_LOCALLY_SOLVED, MOI.NEARLY_FEASIBLE_POINT), :FORCED_STOP => (MOI.OTHER_ERROR, MOI.UNKNOWN_RESULT_STATUS), :SUCCESS => (MOI.LOCALLY_SOLVED, MOI.FEASIBLE_POINT), :STOPVAL_REACHED => (MOI.OBJECTIVE_LIMIT, MOI.UNKNOWN_RESULT_STATUS), :FTOL_REACHED => (MOI.LOCALLY_SOLVED, MOI.FEASIBLE_POINT), :XTOL_REACHED => (MOI.LOCALLY_SOLVED, MOI.FEASIBLE_POINT), :MAXEVAL_REACHED => (MOI.ITERATION_LIMIT, MOI.UNKNOWN_RESULT_STATUS), :MAXTIME_REACHED => (MOI.TIME_LIMIT, MOI.UNKNOWN_RESULT_STATUS), ) function MOI.get(model::Optimizer, ::MOI.TerminationStatus) return _STATUS_MAP[model.status][1] end MOI.get(model::Optimizer, ::MOI.RawStatusString) = string(model.status) function MOI.get(model::Optimizer, ::MOI.ResultCount) return model.status == :NOT_CALLED ? 0 : 1 end function MOI.get(model::Optimizer, attr::MOI.PrimalStatus) if !(1 <= attr.result_index <= MOI.get(model, MOI.ResultCount())) return MOI.NO_SOLUTION end return _STATUS_MAP[model.status][2] end MOI.get(::Optimizer, ::MOI.DualStatus) = MOI.NO_SOLUTION MOI.get(model::Optimizer, ::MOI.SolveTimeSec) = model.solve_time function MOI.get(model::Optimizer, attr::MOI.ObjectiveValue) MOI.check_result_index_bounds(model, attr) return model.objective_value end function MOI.get( model::Optimizer, attr::MOI.VariablePrimal, vi::MOI.VariableIndex, ) MOI.check_result_index_bounds(model, attr) MOI.throw_if_not_valid(model, vi) return model.solution[vi.value] end function MOI.get( model::Optimizer, attr::MOI.ConstraintPrimal, ci::MOI.ConstraintIndex, ) MOI.check_result_index_bounds(model, attr) MOI.throw_if_not_valid(model, ci) return MOI.Utilities.get_fallback(model, attr, ci) end end # module
NLopt
https://github.com/jump-dev/NLopt.jl.git
[ "MIT" ]
1.1.1
81a321298aed95631447a1f3afc2ea83682d44a4
code
949
# Copyright (c) 2019 Mathieu BesanΓ§on, Oscar Dowson, and contributors # # Use of this source code is governed by an MIT-style license that can be found # in the LICENSE.md file or at https://opensource.org/licenses/MIT. using Clang.Generators import NLopt_jll c_api = joinpath(NLopt_jll.artifact_dir, "include", "nlopt.h") build!( create_context( [c_api], get_default_args(), load_options(joinpath(@__DIR__, "generate.toml")), ), ) header = """ # Copyright (c) 2013: Steven G. Johnson and contributors # # Use of this source code is governed by an MIT-style license that can be found # in the LICENSE.md file or at https://opensource.org/licenses/MIT. #! format: off """ filename = joinpath(@__DIR__, "..", "src", "libnlopt.jl") contents = read(filename, String) contents = replace( contents, "const nlopt_opt = Ptr{nlopt_opt_s}" => "const nlopt_opt = Ptr{Cvoid}", ) write(filename, header * contents)
NLopt
https://github.com/jump-dev/NLopt.jl.git
[ "MIT" ]
1.1.1
81a321298aed95631447a1f3afc2ea83682d44a4
code
22219
# Copyright (c) 2013: Steven G. Johnson and contributors # # Use of this source code is governed by an MIT-style license that can be found # in the LICENSE.md file or at https://opensource.org/licenses/MIT. module NLopt using CEnum: @cenum using NLopt_jll: libnlopt include("libnlopt.jl") ############################################################################ # Mirrors of NLopt's C enum constants: @enum Algorithm::Cint begin GN_DIRECT = 0 GN_DIRECT_L = 1 GN_DIRECT_L_RAND = 2 GN_DIRECT_NOSCAL = 3 GN_DIRECT_L_NOSCAL = 4 GN_DIRECT_L_RAND_NOSCAL = 5 GN_ORIG_DIRECT = 6 GN_ORIG_DIRECT_L = 7 GD_STOGO = 8 GD_STOGO_RAND = 9 LD_LBFGS_NOCEDAL = 10 LD_LBFGS = 11 LN_PRAXIS = 12 LD_VAR1 = 13 LD_VAR2 = 14 LD_TNEWTON = 15 LD_TNEWTON_RESTART = 16 LD_TNEWTON_PRECOND = 17 LD_TNEWTON_PRECOND_RESTART = 18 GN_CRS2_LM = 19 GN_MLSL = 20 GD_MLSL = 21 GN_MLSL_LDS = 22 GD_MLSL_LDS = 23 LD_MMA = 24 LN_COBYLA = 25 LN_NEWUOA = 26 LN_NEWUOA_BOUND = 27 LN_NELDERMEAD = 28 LN_SBPLX = 29 LN_AUGLAG = 30 LD_AUGLAG = 31 LN_AUGLAG_EQ = 32 LD_AUGLAG_EQ = 33 LN_BOBYQA = 34 GN_ISRES = 35 AUGLAG = 36 AUGLAG_EQ = 37 G_MLSL = 38 G_MLSL_LDS = 39 LD_SLSQP = 40 LD_CCSAQ = 41 GN_ESCH = 42 GN_AGS = 43 end Base.convert(::Type{nlopt_algorithm}, a::Algorithm) = nlopt_algorithm(Int(a)) Base.convert(::Type{Algorithm}, r::nlopt_algorithm) = Algorithm(Int(r)) function Algorithm(name::Symbol)::Algorithm algorithm = nlopt_algorithm_from_string("$name") if UInt32(algorithm) == 0xffffffff throw(ArgumentError("unknown algorithm: $name")) end return algorithm end # enum nlopt_result @enum Result::Cint begin FORCED_STOP = -5 ROUNDOFF_LIMITED = -4 OUT_OF_MEMORY = -3 INVALID_ARGS = -2 FAILURE = -1 SUCCESS = 1 STOPVAL_REACHED = 2 FTOL_REACHED = 3 XTOL_REACHED = 4 MAXEVAL_REACHED = 5 MAXTIME_REACHED = 6 end Base.convert(::Type{nlopt_result}, r::Result) = nlopt_result(Int(r)) Base.convert(::Type{Result}, r::nlopt_result) = Result(Int(r)) # so that result < 0 checks continue to work Base.isless(x::Integer, r::Result) = isless(x, Cint(r)) Base.isless(r::Result, x::Integer) = isless(Cint(r), x) # so that == :Foo checks continue to work Base.:(==)(s::Symbol, r::Result) = s == Symbol(r) Base.:(==)(r::Result, s::Symbol) = s == r ############################################################################ # wrapper around nlopt_opt type # pass both f and o to the callback so that we can handle exceptions mutable struct Callback_Data f::Function o::Any # should be Opt, but see Julia issue #269 end function Base.unsafe_convert(::Type{Ptr{Cvoid}}, c::Callback_Data) return pointer_from_objref(c) end mutable struct Opt opt::Ptr{Cvoid} # need to store callback data for objective and constraints in # Opt so that they aren't garbage-collected. cb[1] is the objective. cb::Vector{Callback_Data} exception::Any function Opt(p::Ptr{Cvoid}) @assert p != C_NULL opt = new(p, Array{Callback_Data}(undef, 1), nothing) finalizer(destroy, opt) return opt end end function Opt(algorithm::Algorithm, n::Integer) if n < 0 throw(ArgumentError("invalid dimension $n < 0")) end p = nlopt_create(algorithm, n) return Opt(p) end function Opt(algorithm::Union{Integer,Symbol}, n::Integer) return Opt(Algorithm(algorithm), n) end Base.unsafe_convert(::Type{Ptr{Cvoid}}, o::Opt) = getfield(o, :opt) destroy(o::Opt) = nlopt_destroy(o) Base.ndims(o::Opt)::Int = nlopt_get_dimension(o) algorithm(o::Opt)::Algorithm = nlopt_get_algorithm(o) Base.show(io::IO, o::Opt) = print(io, "Opt($(algorithm(o)), $(ndims(o)))") ############################################################################ # copying is a little tricky because we have to tell NLopt to use new # Callback_Data. function munge_callback(p::Ptr{Cvoid}, p_user_data::Ptr{Cvoid}) old_to_new_pointer_map = unsafe_pointer_to_objref(p_user_data)::Dict{Ptr{Cvoid},Ptr{Cvoid}} return old_to_new_pointer_map[p] end function Base.copy(opt::Opt) p = nlopt_copy(opt) if p == C_NULL error("Error in nlopt_copy") end new_opt = Opt(p) opt_callbacks = getfield(opt, :cb) new_callbacks = Vector{Callback_Data}(undef, length(opt_callbacks)) setfield!(new_opt, :cb, new_callbacks) old_to_new_pointer_map = Dict{Ptr{Cvoid},Ptr{Cvoid}}(C_NULL => C_NULL) for i in 1:length(opt_callbacks) if isassigned(opt_callbacks, i) new_callbacks[i] = Callback_Data(opt_callbacks[i].f, new_opt) old_to_new_pointer_map[pointer_from_objref(opt_callbacks[i])] = pointer_from_objref(new_callbacks[i]) end end # nlopt_munge_data is a routine that allows us to convert all pointers to # existing Callback_Data objects into pointers for the corresponding object # in new_callbacks. c_fn = @cfunction(munge_callback, Ptr{Cvoid}, (Ptr{Cvoid}, Ptr{Cvoid})) GC.@preserve old_to_new_pointer_map begin p_old_to_new_pointer_map = pointer_from_objref(old_to_new_pointer_map) nlopt_munge_data(new_opt, c_fn, p_old_to_new_pointer_map) end return new_opt end ############################################################################ # converting error results into exceptions struct ForcedStop <: Exception end function errmsg(o::Opt) msg = nlopt_get_errmsg(o) return msg == C_NULL ? nothing : unsafe_string(msg) end function _errmsg(o::Opt) s = errmsg(o) return s === nothing || isempty(s) ? "" : ": " * s end # check result and throw an exception if necessary chk(o::Opt, result::nlopt_result) = chk(o, convert(Result, result)) function chk(o::Opt, result::Result) if result >= 0 return elseif result == ROUNDOFF_LIMITED return elseif result == INVALID_ARGS throw(ArgumentError("invalid NLopt arguments" * _errmsg(o))) elseif result == OUT_OF_MEMORY throw(OutOfMemoryError()) else error("nlopt failure $result", _errmsg(o)) end end ############################################################################ # getting and setting scalar and vector parameters stopval(o::Opt) = nlopt_get_stopval(o) stopval!(o::Opt, val::Real) = chk(o, nlopt_set_stopval(o, val)) ftol_rel(o::Opt) = nlopt_get_ftol_rel(o) ftol_rel!(o::Opt, val::Real) = chk(o, nlopt_set_ftol_rel(o, val)) ftol_abs(o::Opt) = nlopt_get_ftol_abs(o) ftol_abs!(o::Opt, val::Real) = chk(o, nlopt_set_ftol_abs(o, val)) xtol_rel(o::Opt) = nlopt_get_xtol_rel(o) xtol_rel!(o::Opt, val::Real) = chk(o, nlopt_set_xtol_rel(o, val)) maxeval(o::Opt) = nlopt_get_maxeval(o) maxeval!(o::Opt, val::Integer) = chk(o, nlopt_set_maxeval(o, val)) maxtime(o::Opt) = nlopt_get_maxtime(o) maxtime!(o::Opt, val::Real) = chk(o, nlopt_set_maxtime(o, val)) force_stop(o::Opt) = nlopt_get_force_stop(o) force_stop!(o::Opt, val::Integer) = chk(o, nlopt_set_force_stop(o, val)) force_stop!(o::Opt) = force_stop!(o, 1) population(o::Opt) = nlopt_get_population(o) population!(o::Opt, val::Integer) = chk(o, nlopt_set_population(o, val)) vector_storage(o::Opt) = nlopt_get_vector_storage(o) vector_storage!(o::Opt, val::Integer) = chk(o, nlopt_set_vector_storage(o, val)) ############################################################################ # Optimizer parameters function lower_bounds( o::Opt, v::Vector{Cdouble} = Array{Cdouble}(undef, ndims(o)), ) if length(v) != ndims(o) throw(BoundsError()) end chk(o, nlopt_get_lower_bounds(o, v)) return v end function lower_bounds!(o::Opt, v::Vector{Cdouble}) if length(v) != ndims(o) throw(BoundsError()) end return chk(o, nlopt_set_lower_bounds(o, v)) end function lower_bounds!(o::Opt, v::AbstractVector{<:Real}) return lower_bounds!(o, Array{Cdouble}(v)) end lower_bounds!(o::Opt, val::Real) = chk(o, nlopt_set_lower_bounds1(o, val)) function upper_bounds( o::Opt, v::Vector{Cdouble} = Array{Cdouble}(undef, ndims(o)), ) if length(v) != ndims(o) throw(BoundsError()) end chk(o, nlopt_get_upper_bounds(o, v)) return v end function upper_bounds!(o::Opt, v::Vector{Cdouble}) if length(v) != ndims(o) throw(BoundsError()) end return chk(o, nlopt_set_upper_bounds(o, v)) end function upper_bounds!(o::Opt, v::AbstractVector{<:Real}) return upper_bounds!(o, Array{Cdouble}(v)) end upper_bounds!(o::Opt, val::Real) = chk(o, nlopt_set_upper_bounds1(o, val)) function xtol_abs(o::Opt, v::Vector{Cdouble} = Array{Cdouble}(undef, ndims(o))) if length(v) != ndims(o) throw(BoundsError()) end chk(o, nlopt_get_xtol_abs(o, v)) return v end function xtol_abs!(o::Opt, v::Vector{Cdouble}) if length(v) != ndims(o) throw(BoundsError()) end return chk(o, nlopt_set_xtol_abs(o, v)) end function xtol_abs!(o::Opt, v::AbstractVector{<:Real}) return xtol_abs!(o, Array{Cdouble}(v)) end xtol_abs!(o::Opt, val::Real) = chk(o, nlopt_set_xtol_abs1(o, val)) function local_optimizer!(o::Opt, lo::Opt) return chk(o, nlopt_set_local_optimizer(o, lo)) end function default_initial_step!(o::Opt, x::Vector{Cdouble}) if length(x) != ndims(o) throw(BoundsError()) end return chk(o, nlopt_set_default_initial_step(o, x)) end function default_initial_step!(o::Opt, x::AbstractVector{<:Real}) return default_initial_step!(o, Array{Cdouble}(x)) end function initial_step!(o::Opt, dx::Vector{Cdouble}) if length(dx) != ndims(o) throw(BoundsError()) end return chk(o, nlopt_set_initial_step(o, dx)) end function initial_step!(o::Opt, dx::AbstractVector{<:Real}) return initial_step!(o, Array{Cdouble}(dx)) end function initial_step!(o::Opt, dx::Real) return chk(o, nlopt_set_initial_step1(o, dx)) end function initial_step(o::Opt, x::Vector{Cdouble}, dx::Vector{Cdouble}) if length(x) != ndims(o) || length(dx) != ndims(o) throw(BoundsError()) end chk(o, nlopt_get_initial_step(o, x, dx)) return dx end function initial_step(o::Opt, x::AbstractVector{<:Real}) return initial_step(o, Array{Cdouble}(x), Array{Cdouble}(undef, ndims(o))) end ############################################################################ function algorithm_name(a::Algorithm) p = nlopt_algorithm_name(a) # pointer cannot be C_NULL because we are using only valid Enums @assert p !== C_NULL return unsafe_string(p) end algorithm_name(a::Union{Integer,Symbol}) = algorithm_name(Algorithm(a)) algorithm_name(o::Opt) = algorithm_name(algorithm(o)) function Base.show(io::IO, ::MIME"text/plain", a::Algorithm) show(io, a) return print(io, ": ", algorithm_name(a)) end numevals(o::Opt) = nlopt_get_numevals(o) ############################################################################ function version() major, minor, patch = Ref{Cint}(), Ref{Cint}(), Ref{Cint}() nlopt_version(major, minor, patch) return VersionNumber(major[], minor[], patch[]) end const NLOPT_VERSION = version() ############################################################################ srand(seed::Integer) = nlopt_srand(seed) srand_time() = nlopt_srand_time() ############################################################################ # Objective function: function nlopt_callback_wrapper( n::Cuint, p_x::Ptr{Cdouble}, p_grad::Ptr{Cdouble}, d_::Ptr{Cvoid}, )::Cdouble d = unsafe_pointer_to_objref(d_)::Callback_Data x = unsafe_wrap(Array, p_x, (n,)) grad = unsafe_wrap(Array, p_grad, (n,)) try return d.f(x, p_grad == C_NULL ? Cdouble[] : grad) catch e _catch_forced_stop(d.o, e) end return NaN end function min_objective!(o::Opt, f::Function) cb = Callback_Data(f, o) getfield(o, :cb)[1] = cb c_fn = @cfunction( nlopt_callback_wrapper, Cdouble, (Cuint, Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cvoid}) ) return chk(o, nlopt_set_min_objective(o, c_fn, cb)) end function max_objective!(o::Opt, f::Function) cb = Callback_Data(f, o) getfield(o, :cb)[1] = cb c_fn = @cfunction( nlopt_callback_wrapper, Cdouble, (Cuint, Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cvoid}) ) return chk(o, nlopt_set_max_objective(o, c_fn, cb)) end ############################################################################ # Nonlinear constraints: function inequality_constraint!(o::Opt, f::Function, tol::Real = 0.0) cb = Callback_Data(f, o) push!(getfield(o, :cb), cb) c_fn = @cfunction( nlopt_callback_wrapper, Cdouble, (Cuint, Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cvoid}) ) return chk(o, nlopt_add_inequality_constraint(o, c_fn, cb, tol)) end function equality_constraint!(o::Opt, f::Function, tol::Real = 0.0) cb = Callback_Data(f, o) push!(getfield(o, :cb), cb) c_fn = @cfunction( nlopt_callback_wrapper, Cdouble, (Cuint, Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cvoid}) ) return chk(o, nlopt_add_equality_constraint(o, c_fn, cb, tol)) end function remove_constraints!(o::Opt) resize!(getfield(o, :cb), 1) chk(o, nlopt_remove_inequality_constraints(o)) chk(o, nlopt_remove_equality_constraints(o)) return end ############################################################################ # Vector-valued constraints function nlopt_vcallback_wrapper( m::Cuint, p_res::Ptr{Cdouble}, n::Cuint, p_x::Ptr{Cdouble}, p_grad::Ptr{Cdouble}, d_::Ptr{Cvoid}, ) d = unsafe_pointer_to_objref(d_)::Callback_Data res = unsafe_wrap(Array, p_res, (m,)) x = unsafe_wrap(Array, p_x, (n,)) grad = p_grad == C_NULL ? zeros(Cdouble, 0, 0) : unsafe_wrap(Array, p_grad, (n, m)) try d.f(res, x, grad) catch e _catch_forced_stop(d.o, e) end return end function _catch_forced_stop(o::Opt, e) if e isa ForcedStop setfield!(o, :exception, e) elseif e isa InterruptException setfield!(o, :exception, ForcedStop()) else setfield!(o, :exception, CapturedException(e, catch_backtrace())) end force_stop!(o) return end function inequality_constraint!(o::Opt, f::Function, tol::Vector{Cdouble}) cb = Callback_Data(f, o) push!(getfield(o, :cb), cb) c_fn = @cfunction( nlopt_vcallback_wrapper, Cvoid, (Cuint, Ptr{Cdouble}, Cuint, Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cvoid}), ) ret = nlopt_add_inequality_mconstraint(o, length(tol), c_fn, cb, tol) return chk(o, ret) end function inequality_constraint!( o::Opt, f::Function, tol::AbstractVector{<:Real}, ) return inequality_constraint!(o, f, Array{Float64}(tol)) end function inequality_constraint!( o::Opt, m::Integer, f::Function, tol::Real = 0.0, ) return inequality_constraint!(o, f, fill(Cdouble(tol), m)) end function equality_constraint!(o::Opt, f::Function, tol::Vector{Cdouble}) cb = Callback_Data(f, o) push!(getfield(o, :cb), cb) c_fn = @cfunction( nlopt_vcallback_wrapper, Cvoid, (Cuint, Ptr{Cdouble}, Cuint, Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cvoid}), ) return chk(o, nlopt_add_equality_mconstraint(o, length(tol), c_fn, cb, tol)) end function equality_constraint!(o::Opt, f::Function, tol::AbstractVector{<:Real}) return equality_constraint!(o, f, Array{Float64}(tol)) end function equality_constraint!(o::Opt, m::Integer, f::Function, tol::Real = 0.0) return equality_constraint!(o, f, fill(Cdouble(tol), m)) end ############################################################################ # Dict-like API for generic algorithm properties """ OptParams <: AbstractDict{String, Float64} Dictionary-like structure for accessing algorithm-specific parameters for an NLopt optimization object `opt`, returned by `opt.params`. Use this object to both set and view these string-keyed numeric parameters. """ struct OptParams <: AbstractDict{String,Float64} o::Opt end Base.length(p::OptParams)::Int = nlopt_num_params(p.o) Base.haskey(p::OptParams, s::AbstractString)::Bool = nlopt_has_param(p.o, s) function Base.get(p::OptParams, s::AbstractString, default::Float64) return nlopt_get_param(p.o, s, default) end function Base.get(p::OptParams, s::AbstractString, default) if !haskey(p, s) return default end return nlopt_get_param(p.o, s, NaN) end function Base.setindex!(p::OptParams, v::Real, s::AbstractString) ret = nlopt_set_param(p.o, s, v) return chk(p.o, ret) end function Base.setindex!(p::OptParams, v::Algorithm, s::AbstractString) return setindex!(p, Int(v), s) end function Base.iterate(p::OptParams, state = 0) if state >= length(p) return nothing end name_ptr = nlopt_nth_param(p.o, state) @assert name_ptr != C_NULL name = unsafe_string(name_ptr) return name => p[name], state + 1 end ############################################################################ # property-based getters setters opt.foo for Julia 0.7 # … at some point we will deprecate the old interface. function Base.getproperty(o::Opt, p::Symbol) if p === :lower_bounds return lower_bounds(o) elseif p === :upper_bounds return upper_bounds(o) elseif p === :stopval return stopval(o) elseif p === :ftol_rel return ftol_rel(o) elseif p === :ftol_abs return ftol_abs(o) elseif p === :xtol_rel return xtol_rel(o) elseif p === :xtol_abs return xtol_abs(o) elseif p === :maxeval return maxeval(o) elseif p === :maxtime return maxtime(o) elseif p === :force_stop return force_stop(o) elseif p === :population return population(o) elseif p === :vector_storage return vector_storage(o) elseif p === :initial_step error( "Getting `initial_step` is unsupported. Use " * "`initial_step(opt, x)` to access the initial step at a point `x`.", ) elseif p === :algorithm return algorithm(o) elseif p === :numevals return numevals(o) elseif p === :errmsg return errmsg(o) elseif p === :params return OptParams(o) else error("type Opt has no readable property $p") end end function Base.setproperty!(o::Opt, p::Symbol, x) if p === :lower_bounds lower_bounds!(o, x) elseif p === :upper_bounds upper_bounds!(o, x) elseif p === :stopval stopval!(o, x) elseif p === :ftol_rel ftol_rel!(o, x) elseif p === :ftol_abs ftol_abs!(o, x) elseif p === :xtol_rel xtol_rel!(o, x) elseif p === :xtol_abs xtol_abs!(o, x) elseif p === :maxeval maxeval!(o, x) elseif p === :maxtime maxtime!(o, x) elseif p === :force_stop force_stop!(o, x) elseif p === :population population!(o, x) elseif p === :vector_storage vector_storage!(o, x) elseif p === :local_optimizer local_optimizer!(o, x) elseif p === :default_initial_step default_initial_step!(o, x) elseif p === :initial_step initial_step!(o, x) elseif p === :min_objective min_objective!(o, x) elseif p === :max_objective max_objective!(o, x) elseif p === :inequality_constraint inequality_constraint!(o, x) elseif p === :equality_constraint equality_constraint!(o, x) else error("type Opt has no writable property $p") end return x end function Base.propertynames(o::Opt) return ( :lower_bounds, :upper_bounds, :stopval, :ftol_rel, :ftol_abs, :xtol_rel, :xtol_abs, :maxeval, :maxtime, :force_stop, :population, :vector_storage, :initial_step, :algorithm, :local_optimizer, :default_initial_step, :initial_step, :min_objective, :max_objective, :inequality_constraint, :equality_constraint, :numevals, :errmsg, :params, ) end ############################################################################ # Perform the optimization: function optimize!(o::Opt, x::Vector{Cdouble}) if length(x) != ndims(o) throw(BoundsError()) end opt_f = Ref{Cdouble}(NaN) ret::Result = nlopt_optimize(o, x, opt_f) # We do not need to check the value of `ret`, except if it is a FORCED_STOP # with a Julia-related exception from a callback if ret == FORCED_STOP exception = getfield(o, :exception) setfield!(o, :exception, nothing) if exception !== nothing && !(exception isa ForcedStop) throw(exception) end end return opt_f[], x, Symbol(ret) end function optimize(o::Opt, x::AbstractVector{<:Real}) return optimize!(o, copyto!(Array{Cdouble}(undef, length(x)), x)) end export Opt, NLOPT_VERSION, algorithm, algorithm_name, ForcedStop, lower_bounds!, lower_bounds, upper_bounds!, upper_bounds, stopval!, stopval, ftol_rel!, ftol_rel, ftol_abs!, ftol_abs, xtol_rel!, xtol_rel, xtol_abs!, xtol_abs, maxeval!, maxeval, maxtime!, maxtime, force_stop!, force_stop, population!, population, vector_storage!, vector_storage, initial_step!, initial_step, default_initial_step!, local_optimizer!, min_objective!, max_objective!, equality_constraint!, inequality_constraint!, remove_constraints!, optimize!, optimize, Algorithm, Result @static if !isdefined(Base, :get_extension) include("../ext/NLoptMathOptInterfaceExt.jl") using .NLoptMathOptInterfaceExt const Optimizer = NLoptMathOptInterfaceExt.Optimizer else # declare this upfront so that the MathOptInterface extension can assign it # without creating a new global global Optimizer end end # module
NLopt
https://github.com/jump-dev/NLopt.jl.git
[ "MIT" ]
1.1.1
81a321298aed95631447a1f3afc2ea83682d44a4
code
15792
# Copyright (c) 2013: Steven G. Johnson and contributors # # Use of this source code is governed by an MIT-style license that can be found # in the LICENSE.md file or at https://opensource.org/licenses/MIT. #! format: off using CEnum # typedef double ( * nlopt_func ) ( unsigned n , const double * x , double * gradient , /* NULL if not needed */ void * func_data ) const nlopt_func = Ptr{Cvoid} # typedef void ( * nlopt_mfunc ) ( unsigned m , double * result , unsigned n , const double * x , double * gradient , /* NULL if not needed */ void * func_data ) const nlopt_mfunc = Ptr{Cvoid} # typedef void ( * nlopt_precond ) ( unsigned n , const double * x , const double * v , double * vpre , void * data ) const nlopt_precond = Ptr{Cvoid} @cenum nlopt_algorithm::UInt32 begin NLOPT_GN_DIRECT = 0 NLOPT_GN_DIRECT_L = 1 NLOPT_GN_DIRECT_L_RAND = 2 NLOPT_GN_DIRECT_NOSCAL = 3 NLOPT_GN_DIRECT_L_NOSCAL = 4 NLOPT_GN_DIRECT_L_RAND_NOSCAL = 5 NLOPT_GN_ORIG_DIRECT = 6 NLOPT_GN_ORIG_DIRECT_L = 7 NLOPT_GD_STOGO = 8 NLOPT_GD_STOGO_RAND = 9 NLOPT_LD_LBFGS_NOCEDAL = 10 NLOPT_LD_LBFGS = 11 NLOPT_LN_PRAXIS = 12 NLOPT_LD_VAR1 = 13 NLOPT_LD_VAR2 = 14 NLOPT_LD_TNEWTON = 15 NLOPT_LD_TNEWTON_RESTART = 16 NLOPT_LD_TNEWTON_PRECOND = 17 NLOPT_LD_TNEWTON_PRECOND_RESTART = 18 NLOPT_GN_CRS2_LM = 19 NLOPT_GN_MLSL = 20 NLOPT_GD_MLSL = 21 NLOPT_GN_MLSL_LDS = 22 NLOPT_GD_MLSL_LDS = 23 NLOPT_LD_MMA = 24 NLOPT_LN_COBYLA = 25 NLOPT_LN_NEWUOA = 26 NLOPT_LN_NEWUOA_BOUND = 27 NLOPT_LN_NELDERMEAD = 28 NLOPT_LN_SBPLX = 29 NLOPT_LN_AUGLAG = 30 NLOPT_LD_AUGLAG = 31 NLOPT_LN_AUGLAG_EQ = 32 NLOPT_LD_AUGLAG_EQ = 33 NLOPT_LN_BOBYQA = 34 NLOPT_GN_ISRES = 35 NLOPT_AUGLAG = 36 NLOPT_AUGLAG_EQ = 37 NLOPT_G_MLSL = 38 NLOPT_G_MLSL_LDS = 39 NLOPT_LD_SLSQP = 40 NLOPT_LD_CCSAQ = 41 NLOPT_GN_ESCH = 42 NLOPT_GN_AGS = 43 NLOPT_NUM_ALGORITHMS = 44 end function nlopt_algorithm_name(a) ccall((:nlopt_algorithm_name, libnlopt), Ptr{Cchar}, (nlopt_algorithm,), a) end function nlopt_algorithm_to_string(algorithm) ccall((:nlopt_algorithm_to_string, libnlopt), Ptr{Cchar}, (nlopt_algorithm,), algorithm) end function nlopt_algorithm_from_string(name) ccall((:nlopt_algorithm_from_string, libnlopt), nlopt_algorithm, (Ptr{Cchar},), name) end @cenum nlopt_result::Int32 begin NLOPT_FAILURE = -1 NLOPT_INVALID_ARGS = -2 NLOPT_OUT_OF_MEMORY = -3 NLOPT_ROUNDOFF_LIMITED = -4 NLOPT_FORCED_STOP = -5 NLOPT_NUM_FAILURES = -6 NLOPT_SUCCESS = 1 NLOPT_STOPVAL_REACHED = 2 NLOPT_FTOL_REACHED = 3 NLOPT_XTOL_REACHED = 4 NLOPT_MAXEVAL_REACHED = 5 NLOPT_MAXTIME_REACHED = 6 NLOPT_NUM_RESULTS = 7 end function nlopt_result_to_string(algorithm) ccall((:nlopt_result_to_string, libnlopt), Ptr{Cchar}, (nlopt_result,), algorithm) end function nlopt_result_from_string(name) ccall((:nlopt_result_from_string, libnlopt), nlopt_result, (Ptr{Cchar},), name) end function nlopt_srand(seed) ccall((:nlopt_srand, libnlopt), Cvoid, (Culong,), seed) end function nlopt_srand_time() ccall((:nlopt_srand_time, libnlopt), Cvoid, ()) end function nlopt_version(major, minor, bugfix) ccall((:nlopt_version, libnlopt), Cvoid, (Ptr{Cint}, Ptr{Cint}, Ptr{Cint}), major, minor, bugfix) end mutable struct nlopt_opt_s end const nlopt_opt = Ptr{Cvoid} function nlopt_create(algorithm, n) ccall((:nlopt_create, libnlopt), nlopt_opt, (nlopt_algorithm, Cuint), algorithm, n) end function nlopt_destroy(opt) ccall((:nlopt_destroy, libnlopt), Cvoid, (nlopt_opt,), opt) end function nlopt_copy(opt) ccall((:nlopt_copy, libnlopt), nlopt_opt, (nlopt_opt,), opt) end function nlopt_optimize(opt, x, opt_f) ccall((:nlopt_optimize, libnlopt), nlopt_result, (nlopt_opt, Ptr{Cdouble}, Ptr{Cdouble}), opt, x, opt_f) end function nlopt_set_min_objective(opt, f, f_data) ccall((:nlopt_set_min_objective, libnlopt), nlopt_result, (nlopt_opt, nlopt_func, Ptr{Cvoid}), opt, f, f_data) end function nlopt_set_max_objective(opt, f, f_data) ccall((:nlopt_set_max_objective, libnlopt), nlopt_result, (nlopt_opt, nlopt_func, Ptr{Cvoid}), opt, f, f_data) end function nlopt_set_precond_min_objective(opt, f, pre, f_data) ccall((:nlopt_set_precond_min_objective, libnlopt), nlopt_result, (nlopt_opt, nlopt_func, nlopt_precond, Ptr{Cvoid}), opt, f, pre, f_data) end function nlopt_set_precond_max_objective(opt, f, pre, f_data) ccall((:nlopt_set_precond_max_objective, libnlopt), nlopt_result, (nlopt_opt, nlopt_func, nlopt_precond, Ptr{Cvoid}), opt, f, pre, f_data) end function nlopt_get_algorithm(opt) ccall((:nlopt_get_algorithm, libnlopt), nlopt_algorithm, (nlopt_opt,), opt) end function nlopt_get_dimension(opt) ccall((:nlopt_get_dimension, libnlopt), Cuint, (nlopt_opt,), opt) end function nlopt_get_errmsg(opt) ccall((:nlopt_get_errmsg, libnlopt), Ptr{Cchar}, (nlopt_opt,), opt) end function nlopt_set_param(opt, name, val) ccall((:nlopt_set_param, libnlopt), nlopt_result, (nlopt_opt, Ptr{Cchar}, Cdouble), opt, name, val) end function nlopt_get_param(opt, name, defaultval) ccall((:nlopt_get_param, libnlopt), Cdouble, (nlopt_opt, Ptr{Cchar}, Cdouble), opt, name, defaultval) end function nlopt_has_param(opt, name) ccall((:nlopt_has_param, libnlopt), Cint, (nlopt_opt, Ptr{Cchar}), opt, name) end function nlopt_num_params(opt) ccall((:nlopt_num_params, libnlopt), Cuint, (nlopt_opt,), opt) end function nlopt_nth_param(opt, n) ccall((:nlopt_nth_param, libnlopt), Ptr{Cchar}, (nlopt_opt, Cuint), opt, n) end function nlopt_set_lower_bounds(opt, lb) ccall((:nlopt_set_lower_bounds, libnlopt), nlopt_result, (nlopt_opt, Ptr{Cdouble}), opt, lb) end function nlopt_set_lower_bounds1(opt, lb) ccall((:nlopt_set_lower_bounds1, libnlopt), nlopt_result, (nlopt_opt, Cdouble), opt, lb) end function nlopt_set_lower_bound(opt, i, lb) ccall((:nlopt_set_lower_bound, libnlopt), nlopt_result, (nlopt_opt, Cint, Cdouble), opt, i, lb) end function nlopt_get_lower_bounds(opt, lb) ccall((:nlopt_get_lower_bounds, libnlopt), nlopt_result, (nlopt_opt, Ptr{Cdouble}), opt, lb) end function nlopt_set_upper_bounds(opt, ub) ccall((:nlopt_set_upper_bounds, libnlopt), nlopt_result, (nlopt_opt, Ptr{Cdouble}), opt, ub) end function nlopt_set_upper_bounds1(opt, ub) ccall((:nlopt_set_upper_bounds1, libnlopt), nlopt_result, (nlopt_opt, Cdouble), opt, ub) end function nlopt_set_upper_bound(opt, i, ub) ccall((:nlopt_set_upper_bound, libnlopt), nlopt_result, (nlopt_opt, Cint, Cdouble), opt, i, ub) end function nlopt_get_upper_bounds(opt, ub) ccall((:nlopt_get_upper_bounds, libnlopt), nlopt_result, (nlopt_opt, Ptr{Cdouble}), opt, ub) end function nlopt_remove_inequality_constraints(opt) ccall((:nlopt_remove_inequality_constraints, libnlopt), nlopt_result, (nlopt_opt,), opt) end function nlopt_add_inequality_constraint(opt, fc, fc_data, tol) ccall((:nlopt_add_inequality_constraint, libnlopt), nlopt_result, (nlopt_opt, nlopt_func, Ptr{Cvoid}, Cdouble), opt, fc, fc_data, tol) end function nlopt_add_precond_inequality_constraint(opt, fc, pre, fc_data, tol) ccall((:nlopt_add_precond_inequality_constraint, libnlopt), nlopt_result, (nlopt_opt, nlopt_func, nlopt_precond, Ptr{Cvoid}, Cdouble), opt, fc, pre, fc_data, tol) end function nlopt_add_inequality_mconstraint(opt, m, fc, fc_data, tol) ccall((:nlopt_add_inequality_mconstraint, libnlopt), nlopt_result, (nlopt_opt, Cuint, nlopt_mfunc, Ptr{Cvoid}, Ptr{Cdouble}), opt, m, fc, fc_data, tol) end function nlopt_remove_equality_constraints(opt) ccall((:nlopt_remove_equality_constraints, libnlopt), nlopt_result, (nlopt_opt,), opt) end function nlopt_add_equality_constraint(opt, h, h_data, tol) ccall((:nlopt_add_equality_constraint, libnlopt), nlopt_result, (nlopt_opt, nlopt_func, Ptr{Cvoid}, Cdouble), opt, h, h_data, tol) end function nlopt_add_precond_equality_constraint(opt, h, pre, h_data, tol) ccall((:nlopt_add_precond_equality_constraint, libnlopt), nlopt_result, (nlopt_opt, nlopt_func, nlopt_precond, Ptr{Cvoid}, Cdouble), opt, h, pre, h_data, tol) end function nlopt_add_equality_mconstraint(opt, m, h, h_data, tol) ccall((:nlopt_add_equality_mconstraint, libnlopt), nlopt_result, (nlopt_opt, Cuint, nlopt_mfunc, Ptr{Cvoid}, Ptr{Cdouble}), opt, m, h, h_data, tol) end function nlopt_set_stopval(opt, stopval) ccall((:nlopt_set_stopval, libnlopt), nlopt_result, (nlopt_opt, Cdouble), opt, stopval) end function nlopt_get_stopval(opt) ccall((:nlopt_get_stopval, libnlopt), Cdouble, (nlopt_opt,), opt) end function nlopt_set_ftol_rel(opt, tol) ccall((:nlopt_set_ftol_rel, libnlopt), nlopt_result, (nlopt_opt, Cdouble), opt, tol) end function nlopt_get_ftol_rel(opt) ccall((:nlopt_get_ftol_rel, libnlopt), Cdouble, (nlopt_opt,), opt) end function nlopt_set_ftol_abs(opt, tol) ccall((:nlopt_set_ftol_abs, libnlopt), nlopt_result, (nlopt_opt, Cdouble), opt, tol) end function nlopt_get_ftol_abs(opt) ccall((:nlopt_get_ftol_abs, libnlopt), Cdouble, (nlopt_opt,), opt) end function nlopt_set_xtol_rel(opt, tol) ccall((:nlopt_set_xtol_rel, libnlopt), nlopt_result, (nlopt_opt, Cdouble), opt, tol) end function nlopt_get_xtol_rel(opt) ccall((:nlopt_get_xtol_rel, libnlopt), Cdouble, (nlopt_opt,), opt) end function nlopt_set_xtol_abs1(opt, tol) ccall((:nlopt_set_xtol_abs1, libnlopt), nlopt_result, (nlopt_opt, Cdouble), opt, tol) end function nlopt_set_xtol_abs(opt, tol) ccall((:nlopt_set_xtol_abs, libnlopt), nlopt_result, (nlopt_opt, Ptr{Cdouble}), opt, tol) end function nlopt_get_xtol_abs(opt, tol) ccall((:nlopt_get_xtol_abs, libnlopt), nlopt_result, (nlopt_opt, Ptr{Cdouble}), opt, tol) end function nlopt_set_x_weights1(opt, w) ccall((:nlopt_set_x_weights1, libnlopt), nlopt_result, (nlopt_opt, Cdouble), opt, w) end function nlopt_set_x_weights(opt, w) ccall((:nlopt_set_x_weights, libnlopt), nlopt_result, (nlopt_opt, Ptr{Cdouble}), opt, w) end function nlopt_get_x_weights(opt, w) ccall((:nlopt_get_x_weights, libnlopt), nlopt_result, (nlopt_opt, Ptr{Cdouble}), opt, w) end function nlopt_set_maxeval(opt, maxeval) ccall((:nlopt_set_maxeval, libnlopt), nlopt_result, (nlopt_opt, Cint), opt, maxeval) end function nlopt_get_maxeval(opt) ccall((:nlopt_get_maxeval, libnlopt), Cint, (nlopt_opt,), opt) end function nlopt_get_numevals(opt) ccall((:nlopt_get_numevals, libnlopt), Cint, (nlopt_opt,), opt) end function nlopt_set_maxtime(opt, maxtime) ccall((:nlopt_set_maxtime, libnlopt), nlopt_result, (nlopt_opt, Cdouble), opt, maxtime) end function nlopt_get_maxtime(opt) ccall((:nlopt_get_maxtime, libnlopt), Cdouble, (nlopt_opt,), opt) end function nlopt_force_stop(opt) ccall((:nlopt_force_stop, libnlopt), nlopt_result, (nlopt_opt,), opt) end function nlopt_set_force_stop(opt, val) ccall((:nlopt_set_force_stop, libnlopt), nlopt_result, (nlopt_opt, Cint), opt, val) end function nlopt_get_force_stop(opt) ccall((:nlopt_get_force_stop, libnlopt), Cint, (nlopt_opt,), opt) end function nlopt_set_local_optimizer(opt, local_opt) ccall((:nlopt_set_local_optimizer, libnlopt), nlopt_result, (nlopt_opt, nlopt_opt), opt, local_opt) end function nlopt_set_population(opt, pop) ccall((:nlopt_set_population, libnlopt), nlopt_result, (nlopt_opt, Cuint), opt, pop) end function nlopt_get_population(opt) ccall((:nlopt_get_population, libnlopt), Cuint, (nlopt_opt,), opt) end function nlopt_set_vector_storage(opt, dim) ccall((:nlopt_set_vector_storage, libnlopt), nlopt_result, (nlopt_opt, Cuint), opt, dim) end function nlopt_get_vector_storage(opt) ccall((:nlopt_get_vector_storage, libnlopt), Cuint, (nlopt_opt,), opt) end function nlopt_set_default_initial_step(opt, x) ccall((:nlopt_set_default_initial_step, libnlopt), nlopt_result, (nlopt_opt, Ptr{Cdouble}), opt, x) end function nlopt_set_initial_step(opt, dx) ccall((:nlopt_set_initial_step, libnlopt), nlopt_result, (nlopt_opt, Ptr{Cdouble}), opt, dx) end function nlopt_set_initial_step1(opt, dx) ccall((:nlopt_set_initial_step1, libnlopt), nlopt_result, (nlopt_opt, Cdouble), opt, dx) end function nlopt_get_initial_step(opt, x, dx) ccall((:nlopt_get_initial_step, libnlopt), nlopt_result, (nlopt_opt, Ptr{Cdouble}, Ptr{Cdouble}), opt, x, dx) end # typedef void * ( * nlopt_munge ) ( void * p ) const nlopt_munge = Ptr{Cvoid} function nlopt_set_munge(opt, munge_on_destroy, munge_on_copy) ccall((:nlopt_set_munge, libnlopt), Cvoid, (nlopt_opt, nlopt_munge, nlopt_munge), opt, munge_on_destroy, munge_on_copy) end # typedef void * ( * nlopt_munge2 ) ( void * p , void * data ) const nlopt_munge2 = Ptr{Cvoid} function nlopt_munge_data(opt, munge, data) ccall((:nlopt_munge_data, libnlopt), Cvoid, (nlopt_opt, nlopt_munge2, Ptr{Cvoid}), opt, munge, data) end # typedef double ( * nlopt_func_old ) ( int n , const double * x , double * gradient , /* NULL if not needed */ void * func_data ) const nlopt_func_old = Ptr{Cvoid} function nlopt_minimize(algorithm, n, f, f_data, lb, ub, x, minf, minf_max, ftol_rel, ftol_abs, xtol_rel, xtol_abs, maxeval, maxtime) ccall((:nlopt_minimize, libnlopt), nlopt_result, (nlopt_algorithm, Cint, nlopt_func_old, Ptr{Cvoid}, Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble}, Cdouble, Cdouble, Cdouble, Cdouble, Ptr{Cdouble}, Cint, Cdouble), algorithm, n, f, f_data, lb, ub, x, minf, minf_max, ftol_rel, ftol_abs, xtol_rel, xtol_abs, maxeval, maxtime) end function nlopt_minimize_constrained(algorithm, n, f, f_data, m, fc, fc_data, fc_datum_size, lb, ub, x, minf, minf_max, ftol_rel, ftol_abs, xtol_rel, xtol_abs, maxeval, maxtime) ccall((:nlopt_minimize_constrained, libnlopt), nlopt_result, (nlopt_algorithm, Cint, nlopt_func_old, Ptr{Cvoid}, Cint, nlopt_func_old, Ptr{Cvoid}, Cptrdiff_t, Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble}, Cdouble, Cdouble, Cdouble, Cdouble, Ptr{Cdouble}, Cint, Cdouble), algorithm, n, f, f_data, m, fc, fc_data, fc_datum_size, lb, ub, x, minf, minf_max, ftol_rel, ftol_abs, xtol_rel, xtol_abs, maxeval, maxtime) end function nlopt_minimize_econstrained(algorithm, n, f, f_data, m, fc, fc_data, fc_datum_size, p, h, h_data, h_datum_size, lb, ub, x, minf, minf_max, ftol_rel, ftol_abs, xtol_rel, xtol_abs, htol_rel, htol_abs, maxeval, maxtime) ccall((:nlopt_minimize_econstrained, libnlopt), nlopt_result, (nlopt_algorithm, Cint, nlopt_func_old, Ptr{Cvoid}, Cint, nlopt_func_old, Ptr{Cvoid}, Cptrdiff_t, Cint, nlopt_func_old, Ptr{Cvoid}, Cptrdiff_t, Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble}, Cdouble, Cdouble, Cdouble, Cdouble, Ptr{Cdouble}, Cdouble, Cdouble, Cint, Cdouble), algorithm, n, f, f_data, m, fc, fc_data, fc_datum_size, p, h, h_data, h_datum_size, lb, ub, x, minf, minf_max, ftol_rel, ftol_abs, xtol_rel, xtol_abs, htol_rel, htol_abs, maxeval, maxtime) end function nlopt_get_local_search_algorithm(deriv, nonderiv, maxeval) ccall((:nlopt_get_local_search_algorithm, libnlopt), Cvoid, (Ptr{nlopt_algorithm}, Ptr{nlopt_algorithm}, Ptr{Cint}), deriv, nonderiv, maxeval) end function nlopt_set_local_search_algorithm(deriv, nonderiv, maxeval) ccall((:nlopt_set_local_search_algorithm, libnlopt), Cvoid, (nlopt_algorithm, nlopt_algorithm, Cint), deriv, nonderiv, maxeval) end function nlopt_get_stochastic_population() ccall((:nlopt_get_stochastic_population, libnlopt), Cint, ()) end function nlopt_set_stochastic_population(pop) ccall((:nlopt_set_stochastic_population, libnlopt), Cvoid, (Cint,), pop) end const NLOPT_MINF_MAX_REACHED = NLOPT_STOPVAL_REACHED # Skipping MacroDefinition: NLOPT_DEPRECATED __attribute__ ( ( deprecated ) )
NLopt
https://github.com/jump-dev/NLopt.jl.git
[ "MIT" ]
1.1.1
81a321298aed95631447a1f3afc2ea83682d44a4
code
20994
# Copyright (c) 2013: Steven G. Johnson and contributors # # Use of this source code is governed by an MIT-style license that can be found # in the LICENSE.md file or at https://opensource.org/licenses/MIT. module TestCAPI using NLopt using Test function runtests() for name in names(@__MODULE__; all = true) if !startswith("$(name)", "test_") continue end @testset "$(name)" begin getfield(@__MODULE__, name)() end end return end function test_readme_example() function my_objective_fn(x::Vector, grad::Vector) if length(grad) > 0 grad[1] = 0 grad[2] = 0.5 / sqrt(x[2]) end return sqrt(x[2]) end function my_constraint_fn(x::Vector, grad::Vector, a, b) if length(grad) > 0 grad[1] = 3 * a * (a * x[1] + b)^2 grad[2] = -1 end return (a * x[1] + b)^3 - x[2] end opt = Opt(:LD_MMA, 2) lower_bounds!(opt, [-Inf, 0.0]) xtol_rel!(opt, 1e-4) min_objective!(opt, my_objective_fn) inequality_constraint!(opt, (x, g) -> my_constraint_fn(x, g, 2, 0), 1e-8) inequality_constraint!(opt, (x, g) -> my_constraint_fn(x, g, -1, 1), 1e-8) min_f, min_x, ret = optimize(opt, [1.234, 5.678]) @test min_f β‰ˆ 0.5443310477213124 @test min_x β‰ˆ [0.3333333342139688, 0.29629628951338166] @test ret == :XTOL_REACHED return end function test_readme_example_vector() function my_objective_fn(x::Vector, grad::Vector) if length(grad) > 0 grad[1] = 0 grad[2] = 0.5 / sqrt(x[2]) end return sqrt(x[2]) end function my_constraint_fn(ret, x::Vector, grad::Matrix) if length(grad) > 0 grad[1, 1] = 3 * 2 * (2 * x[1] + 0)^2 grad[2, 1] = -1 grad[1, 2] = 3 * -1 * (-1 * x[1] + 1)^2 grad[2, 2] = -1 end ret[1] = (2 * x[1] + 0)^3 - x[2] ret[2] = (-1 * x[1] + 1)^3 - x[2] return end opt = Opt(:LD_MMA, 2) lower_bounds!(opt, [-Inf, 0.0]) xtol_rel!(opt, 1e-4) min_objective!(opt, my_objective_fn) inequality_constraint!(opt, my_constraint_fn, [1e-8, 1e-8]) min_f, min_x, ret = optimize(opt, [1.234, 5.678]) @test min_f β‰ˆ 0.5443310477213124 @test min_x β‰ˆ [0.3333333342139688, 0.29629628951338166] @test ret == :XTOL_REACHED return end function test_readme_example_vector_real_tol() function my_objective_fn(x::Vector, grad::Vector) if length(grad) > 0 grad[1] = 0 grad[2] = 0.5 / sqrt(x[2]) end return sqrt(x[2]) end function my_constraint_fn(ret, x::Vector, grad::Matrix) if length(grad) > 0 grad[1, 1] = 3 * 2 * (2 * x[1] + 0)^2 grad[2, 1] = -1 grad[1, 2] = 3 * -1 * (-1 * x[1] + 1)^2 grad[2, 2] = -1 end ret[1] = (2 * x[1] + 0)^3 - x[2] ret[2] = (-1 * x[1] + 1)^3 - x[2] return end opt = Opt(:LD_MMA, 2) lower_bounds!(opt, [-Inf, 0.0]) xtol_rel!(opt, 1e-4) min_objective!(opt, my_objective_fn) tol = [1 // 1_000_000, 1 // 1_000_000] inequality_constraint!(opt, my_constraint_fn, tol) min_f, min_x, ret = optimize(opt, [1.234, 5.678]) @test min_f β‰ˆ 0.5443310477213124 @test min_x β‰ˆ [0.3333333342139688, 0.29629628951338166] @test ret == :XTOL_REACHED return end function test_vector_equality_constraint_real_tol() function my_objective_fn(x::Vector, grad::Vector) if length(grad) > 0 grad[1] = 0 grad[2] = 0.5 / sqrt(x[2]) end return sqrt(x[2]) end function my_constraint_fn(ret, x::Vector, grad::Matrix) if length(grad) > 0 grad[1, 1] = 3 * 2 * (2 * x[1] + 0)^2 grad[2, 1] = -1 grad[1, 2] = 3 * -1 * (-1 * x[1] + 1)^2 grad[2, 2] = -1 end ret[1] = (2 * x[1] + 0)^3 - x[2] ret[2] = (-1 * x[1] + 1)^3 - x[2] return end opt = Opt(:LD_SLSQP, 2) lower_bounds!(opt, [-Inf, 0.0]) xtol_rel!(opt, 1e-4) min_objective!(opt, my_objective_fn) tol = [1 // 1_000_000, 1 // 1_000_000] equality_constraint!(opt, my_constraint_fn, tol) min_f, min_x, ret = optimize(opt, [1.234, 5.678]) @test min_f β‰ˆ 0.5443310477213124 @test min_x β‰ˆ [0.3333333342139688, 0.29629628951338166] @test ret == :XTOL_REACHED return end function test_vector_equality_constraint_scalar_tol() function my_objective_fn(x::Vector, grad::Vector) if length(grad) > 0 grad[1] = 0 grad[2] = 0.5 / sqrt(x[2]) end return sqrt(x[2]) end function my_constraint_fn(ret, x::Vector, grad::Matrix) if length(grad) > 0 grad[1, 1] = 3 * 2 * (2 * x[1] + 0)^2 grad[2, 1] = -1 grad[1, 2] = 3 * -1 * (-1 * x[1] + 1)^2 grad[2, 2] = -1 end ret[1] = (2 * x[1] + 0)^3 - x[2] ret[2] = (-1 * x[1] + 1)^3 - x[2] return end opt = Opt(:LD_SLSQP, 2) lower_bounds!(opt, [-Inf, 0.0]) xtol_rel!(opt, 1e-4) min_objective!(opt, my_objective_fn) equality_constraint!(opt, 2, my_constraint_fn, 1 // 1_000_000) min_f, min_x, ret = optimize(opt, [1.234, 5.678]) @test min_f β‰ˆ 0.5443310477213124 @test min_x β‰ˆ [0.3333333342139688, 0.29629628951338166] @test ret == :XTOL_REACHED return end function test_vector_forced_stop() function my_objective_fn(x::Vector, grad::Vector) if length(grad) > 0 grad[1] = 0 grad[2] = 0.5 / sqrt(x[2]) end return sqrt(x[2]) end function my_constraint_fn(ret, x::Vector, grad::Matrix) throw(NLopt.ForcedStop()) return end opt = Opt(:LD_MMA, 2) lower_bounds!(opt, [-Inf, 0.0]) xtol_rel!(opt, 1e-4) min_objective!(opt, my_objective_fn) inequality_constraint!(opt, my_constraint_fn, [1e-8, 1e-8]) min_f, min_x, ret = optimize(opt, [1.234, 5.678]) @test ret == :FORCED_STOP return end function test_vector_interrupt_exception() function my_objective_fn(x::Vector, grad::Vector) if length(grad) > 0 grad[1] = 0 grad[2] = 0.5 / sqrt(x[2]) end return sqrt(x[2]) end function my_constraint_fn(ret, x::Vector, grad::Matrix) throw(NLopt.InterruptException()) return end opt = Opt(:LD_MMA, 2) lower_bounds!(opt, [-Inf, 0.0]) xtol_rel!(opt, 1e-4) min_objective!(opt, my_objective_fn) inequality_constraint!(opt, my_constraint_fn, [1e-8, 1e-8]) min_f, min_x, ret = optimize(opt, [1.234, 5.678]) @test ret == :FORCED_STOP return end function test_max_objective() opt = Opt(:LD_MMA, 2) function objective_fn(x, grad) if length(grad) > 0 grad[1] = -2 * (x[1] - 0.5) grad[2] = -2 * (x[2] - 1) end return -((x[1] - 0.5)^2 + (x[2] - 1)^2) end xtol_abs!(opt, 1e-5) lower_bounds!(opt, -1) upper_bounds!(opt, 2) opt.max_objective = objective_fn minf, minx, ret = optimize(opt, [0.0, 0.0]) @test β‰ˆ(minx, [0.5, 1.0]; atol = 1e-4) max_objective!(opt, objective_fn) minf, minx, ret = optimize(opt, [0.0, 0.0]) @test β‰ˆ(minx, [0.5, 1.0]; atol = 1e-4) return end function test_min_objective() opt = Opt(:LD_MMA, 2) function objective_fn(x, grad) if length(grad) > 0 grad[1] = 2 * (x[1] - 0.5) grad[2] = 2 * (x[2] - 1) end return (x[1] - 0.5)^2 + (x[2] - 1)^2 end xtol_abs!(opt, 1e-5) lower_bounds!(opt, -1) upper_bounds!(opt, 2) opt.min_objective = objective_fn minf, minx, ret = optimize(opt, [0.0, 0.0]) @test β‰ˆ(minx, [0.5, 1.0]; atol = 1e-4) min_objective!(opt, objective_fn) minf, minx, ret = optimize(opt, [0.0, 0.0]) @test β‰ˆ(minx, [0.5, 1.0]; atol = 1e-4) return end function test_issue_163() opt = Opt(:LN_COBYLA, 2) opt.min_objective = (x, g) -> sum(x .^ 2) inequality_constraint!(opt, 2, (result, x, g) -> (result .= 1 .- x)) (minf, minx, ret) = optimize(opt, [2.0, 2.0]) @test minx β‰ˆ [1.0, 1.0] return end function test_issue_132() opt = Opt(:LN_COBYLA, 2) err = ErrorException( "Getting `initial_step` is unsupported. Use " * "`initial_step(opt, x)` to access the initial step at a point `x`.", ) @test_throws err opt.initial_step return end function test_issue_156_CapturedException() f(x, g = []) = (error("test error"); x[1]^2) opt = Opt(:LN_SBPLX, 1) opt.min_objective = f @test_throws CapturedException optimize(opt, [0.1234]) @test getfield(opt, :exception) === nothing try optimize(opt, [0.1234]) catch e # Check that the backtrace is being printed @test length(sprint(show, e)) > 100 end return end function test_issue_156_ForcedStop() f(x, g = []) = (throw(NLopt.ForcedStop()); x[1]^2) opt = Opt(:LN_SBPLX, 1) opt.min_objective = f fmin, xmin, ret = optimize(opt, [0.1234]) @test ret == :FORCED_STOP @test getfield(opt, :exception) === nothing return end function test_issue_156_no_error() f(x, g = []) = (x[1]^2) opt = Opt(:LN_SBPLX, 1) opt.min_objective = f fmin, xmin, ret = optimize(opt, [0.1234]) @test ret ∈ (:SUCCESS, :FTOL_REACHED, :XTOL_REACHED) @test getfield(opt, :exception) === nothing return end function test_invalid_algorithms() @test_throws ArgumentError("unknown algorithm: BILL") Algorithm(:BILL) @test_throws ArgumentError("unknown algorithm: BILL") Opt(:BILL, 420) return end function test_issue_133() function rosenbrock(x::Vector, grad::Vector) if length(grad) > 0 grad[1] = -400 * x[1] * (x[2] - x[1]^2) - 2 * (1 - x[1]) grad[2] = 200 * (x[2] - x[1]^2) end return (1 - x[1])^2 + 100 * (x[2] - x[1]^2)^2 end function ineq01(x::Vector, grad::Vector) if length(grad) > 0 grad[1] = 1 grad[2] = 2 end return x[1] + 2 * x[2] - 1 end function ineq02(x::Vector, grad::Vector) if length(grad) > 0 grad[1] = 2 * x[1] grad[2] = 1 end return x[1]^2 + x[2] - 1 end function ineq03(x::Vector, grad::Vector) if length(grad) > 0 grad[1] = 2 * x[1] grad[2] = -1 end return x[1]^2 - x[2] - 1 end function eq01(x::Vector, grad::Vector) if length(grad) > 0 grad[1] = 2 grad[2] = 1 end return 2 * x[1] + x[2] - 1 end opt = Opt(:LD_SLSQP, 2) opt.lower_bounds = [0, -0.5] opt.upper_bounds = [1, 2] opt.xtol_rel = 1e-21 opt.min_objective = rosenbrock opt.inequality_constraint = ineq01 opt.inequality_constraint = ineq02 opt.inequality_constraint = ineq03 opt.equality_constraint = eq01 (minf, minx, ret) = optimize(opt, [0.5, 0]) println("got $minf at $minx with constraints (returned $ret)") @test minx[1] β‰ˆ 0.4149 rtol = 1e-3 @test minx[2] β‰ˆ 0.1701 rtol = 1e-3 remove_constraints!(opt) (minf, minx, ret) = optimize(opt, [0.5, 0]) println("got $minf at $minx after removing constraints (returned $ret)") @test minx[1] β‰ˆ 1 rtol = 1e-5 @test minx[2] β‰ˆ 1 rtol = 1e-5 return end function test_tutorial() count = 0 # keep track of # function evaluations function myfunc(x::Vector, grad::Vector) if length(grad) > 0 grad[1] = 0 grad[2] = 0.5 / sqrt(x[2]) end count::Int += 1 println("f_$count($x)") return sqrt(x[2]) end function myconstraint(x::Vector, grad::Vector, a, b) if length(grad) > 0 grad[1] = 3a * (a * x[1] + b)^2 grad[2] = -1 end return (a * x[1] + b)^3 - x[2] end opt = Opt(:LD_MMA, 2) opt.lower_bounds = [-Inf, 0.0] opt.xtol_rel = 1e-4 opt.min_objective = myfunc opt.inequality_constraint = (x, g) -> myconstraint(x, g, 2, 0) opt.inequality_constraint = (x, g) -> myconstraint(x, g, -1, 1) # test algorithm-parameter API opt.params["verbosity"] = 0 opt.params["inner_maxeval"] = 10 opt.params["dual_alg"] = NLopt.LD_MMA @test opt.params == Dict( "verbosity" => 0, "inner_maxeval" => 10, "dual_alg" => Int(NLopt.LD_MMA), ) @test get(opt.params, "foobar", 3.14159) === 3.14159 (minf, minx, ret) = optimize(opt, [1.234, 5.678]) println("got $minf at $minx after $count iterations (returned $ret)") @test minx[1] β‰ˆ 1 / 3 rtol = 1e-5 @test minx[2] β‰ˆ 8 / 27 rtol = 1e-5 @test minf β‰ˆ sqrt(8 / 27) rtol = 1e-5 @test ret == :XTOL_REACHED @test opt.numevals == count return end # It's not obvious why this test returns FAILURE. If it breaks in future, look # for something else. function test_return_FAILURE_from_optimize() function objective_fn(x, grad) if length(grad) > 0 grad[1] = -2 * (1 - x[1]) - 400 * x[1] * (x[2] - x[1]^2) grad[2] = 200 * (x[2] - x[1]^2) end return (1 - x[1])^2 + 100 * (x[2] - x[1]^2)^2 end function eq_constraint_fn(h, x, J) if length(J) > 0 J[1, 1] = 2x[1] J[2, 1] = 2x[2] end h[1] = x[1]^2 + x[2]^2 - 1.0 return end opt = Opt(:AUGLAG, 2) opt.local_optimizer = Opt(:LD_LBFGS, 2) opt.min_objective = objective_fn equality_constraint!(opt, eq_constraint_fn, [1e-8]) _, _, ret = optimize(opt, [0.5, 0.5]) @test ret == :FAILURE return end function test_optimize!_bounds_error() opt = Opt(:AUGLAG, 2) @test_throws BoundsError optimize!(opt, Cdouble[]) return end function test_property_names() opt = Opt(:AUGLAG, 2) for (key, value) in ( :lower_bounds => [1, 2], :upper_bounds => [2, 3], :stopval => 0.5, :ftol_rel => 0.1, :ftol_abs => 0.2, :xtol_rel => 0.3, :xtol_abs => [0.4, 0.5], # TODO :maxeval => 5, :maxtime => 60.0, :force_stop => 1, :population => 0x00000001, :vector_storage => 0x00000002, ) @test key in propertynames(opt) f = getfield(NLopt, key) @test getproperty(opt, key) == f(opt) setproperty!(opt, key, value) @test f(opt) == value end # Other getters @test :initial_step in propertynames(opt) @test_throws( ErrorException( "Getting `initial_step` is unsupported. Use `initial_step(opt, x)` to access the initial step at a point `x`.", ), opt.initial_step, ) @test :algorithm in propertynames(opt) @test opt.algorithm == algorithm(opt) @test :numevals in propertynames(opt) @test opt.numevals == NLopt.numevals(opt) @test :errmsg in propertynames(opt) @test opt.errmsg == NLopt.errmsg(opt) @test :params in propertynames(opt) @test opt.params == NLopt.OptParams(opt) @test_throws ErrorException("type Opt has no readable property foo") opt.foo @test_throws( ErrorException("type Opt has no writable property foo"), setproperty!(opt, :foo, 1), ) return end function test_get_opt_params_default() opt = Opt(:AUGLAG, 2) @test get(opt.params, "abc", :default) == :default return end function test_srand() @test NLopt.srand(1234) === nothing @test NLopt.srand_time() === nothing return end function test_algorithm() opt = Opt(:LD_LBFGS, 2) @test algorithm(opt) == NLopt.LD_LBFGS return end function test_algorithm_enum() @test convert(Algorithm, NLopt.NLOPT_LD_LBFGS) == NLopt.LD_LBFGS @test convert(NLopt.nlopt_algorithm, NLopt.LD_LBFGS) == NLopt.NLOPT_LD_LBFGS return end function test_result_enum() @test convert(Result, NLopt.NLOPT_SUCCESS) == NLopt.SUCCESS @test convert(NLopt.nlopt_result, NLopt.SUCCESS) == NLopt.NLOPT_SUCCESS return end function test_result_arithmetic() @test !(NLopt.SUCCESS < 0) @test 0 < NLopt.SUCCESS @test NLopt.SUCCESS == :SUCCESS @test :SUCCESS == NLopt.SUCCESS return end function test_opt_argument_error() @test_throws ArgumentError Opt(:LD_LBFGS, -2) return end function test_show_opt() opt = Opt(:LD_LBFGS, 2) @test sprint(show, opt) == "Opt(LD_LBFGS, 2)" return end function test_chk() opt = Opt(:LD_LBFGS, 2) @test NLopt.chk(opt, NLopt.SUCCESS) === nothing @test NLopt.chk(opt, NLopt.ROUNDOFF_LIMITED) === nothing @test_throws ArgumentError NLopt.chk(opt, NLopt.INVALID_ARGS) @test_throws OutOfMemoryError NLopt.chk(opt, NLopt.OUT_OF_MEMORY) @test_throws( ErrorException("nlopt failure FAILURE"), NLopt.chk(opt, NLopt.FAILURE) ) return end function test_algorithm_name() algorithm = NLopt.LD_LBFGS sol = "Limited-memory BFGS (L-BFGS) (local, derivative-based)" @test algorithm_name(algorithm) == sol @test algorithm_name(:LD_LBFGS) == sol @test algorithm_name(11) == sol opt = Opt(:LD_LBFGS, 2) @test algorithm_name(opt) == sol sprint(show, algorithm_name(:LD_LBFGS)) @test sprint(show, MIME("text/plain"), NLopt.LD_LBFGS) == "NLopt.LD_LBFGS: Limited-memory BFGS (L-BFGS) (local, derivative-based)" return end function test_lower_bounds() opt = Opt(:LD_LBFGS, 2) @test_throws BoundsError lower_bounds(opt, Cdouble[]) @test_throws BoundsError lower_bounds!(opt, Cdouble[]) v = [1.0, 2.0] @test lower_bounds(opt, v) === v @test v == [-Inf, -Inf] lower_bounds!(opt, 3) @test lower_bounds(opt) == [3.0, 3.0] lower_bounds!(opt, [1 // 2, 3 // 4]) @test lower_bounds(opt) == [0.5, 0.75] return end function test_upper_bounds() opt = Opt(:LD_LBFGS, 2) @test_throws BoundsError upper_bounds(opt, Cdouble[]) @test_throws BoundsError upper_bounds!(opt, Cdouble[]) v = [1.0, 2.0] @test upper_bounds(opt, v) === v @test v == [Inf, Inf] upper_bounds!(opt, 3) @test upper_bounds(opt) == [3.0, 3.0] upper_bounds!(opt, [1 // 2, 3 // 4]) @test upper_bounds(opt) == [0.5, 0.75] return end function test_xtol_abs() opt = Opt(:LD_LBFGS, 2) @test_throws BoundsError xtol_abs(opt, Cdouble[]) @test_throws BoundsError xtol_abs!(opt, Cdouble[]) v = [1.0, 2.0] @test xtol_abs(opt, v) === v @test v == [0.0, 0.0] xtol_abs!(opt, 3) @test xtol_abs(opt) == [3.0, 3.0] xtol_abs!(opt, [1 // 2, 3 // 4]) @test xtol_abs(opt) == [0.5, 0.75] return end function test_initial_step() opt = Opt(:LD_LBFGS, 2) @test_throws BoundsError default_initial_step!(opt, Cdouble[]) @test_throws BoundsError initial_step!(opt, Cdouble[]) x = [1.0, 2.0] dx = [NaN, NaN] default_initial_step!(opt, [0.2, 0.4]) @test initial_step(opt, x, dx) == [0.2, 0.4] opt.default_initial_step = [0.25, 0.45] @test initial_step(opt, x, dx) == [0.25, 0.45] default_initial_step!(opt, [1 // 2, 3 // 4]) @test initial_step(opt, x, dx) == [0.5, 0.75] @test_throws BoundsError initial_step(opt, x, Cdouble[]) @test_throws BoundsError initial_step(opt, Cdouble[], dx) default_initial_step!(opt, x) @test initial_step(opt, x, dx) == [1.0, 2.0] @test dx == [1.0, 2.0] @test initial_step(opt, [1 // 1, 2 // 1]) == [1.0, 2.0] initial_step!(opt, [0.1, 0.2]) @test initial_step(opt, x, dx) == [0.1, 0.2] opt.initial_step = [0.15, 0.25] @test initial_step(opt, x, dx) == [0.15, 0.25] initial_step!(opt, [2 // 10, 3 // 10]) @test initial_step(opt, x, dx) == [0.2, 0.3] initial_step!(opt, 1 // 2) @test initial_step(opt, x, dx) == [0.5, 0.5] return end function test_copy() function my_objective_fn(x::Vector, grad::Vector) if length(grad) > 0 grad[1] = 0 grad[2] = 0.5 / sqrt(x[2]) end return sqrt(x[2]) end function my_constraint_fn(x::Vector, grad::Vector, a, b) if length(grad) > 0 grad[1] = 3 * a * (a * x[1] + b)^2 grad[2] = -1 end return (a * x[1] + b)^3 - x[2] end opt = Opt(:LD_MMA, 2) lower_bounds!(opt, [-Inf, 0.0]) xtol_rel!(opt, 1e-4) min_objective!(opt, my_objective_fn) inequality_constraint!(opt, (x, g) -> my_constraint_fn(x, g, 2, 0), 1e-8) inequality_constraint!(opt, (x, g) -> my_constraint_fn(x, g, -1, 1), 1e-8) opt_2 = copy(opt) min_f, min_x, ret = optimize(opt_2, [1.234, 5.678]) @test min_f β‰ˆ 0.5443310477213124 @test min_x β‰ˆ [0.3333333342139688, 0.29629628951338166] @test ret == :XTOL_REACHED return end function test_copy_failure() opt = Opt(:LD_MMA, 2) setfield!(opt, :opt, C_NULL) @test_throws ErrorException("Error in nlopt_copy") copy(opt) return end end # module TestCAPI.runtests()
NLopt
https://github.com/jump-dev/NLopt.jl.git